From 81d46daf930a1c31b265f5cf14694e45b1e47c0f Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Mon, 5 Mar 2018 19:14:50 +0100 Subject: [PATCH] reduce number of threads for uzfs (#47) --- include/sys/zfs_context.h | 2 +- lib/libzpool/taskq.c | 7 +++++-- module/zfs/arc.c | 4 ++-- module/zfs/dsl_pool.c | 4 ++-- module/zfs/spa.c | 6 +++--- module/zfs/txg.c | 18 +++++++++--------- module/zfs/vdev_file.c | 5 +++-- 7 files changed, 25 insertions(+), 21 deletions(-) diff --git a/include/sys/zfs_context.h b/include/sys/zfs_context.h index 62e0c45c4663..a9201e787582 100644 --- a/include/sys/zfs_context.h +++ b/include/sys/zfs_context.h @@ -654,7 +654,7 @@ extern void delay(clock_t ticks); } while (0); #define max_ncpus 64 -#define boot_ncpus (sysconf(_SC_NPROCESSORS_ONLN)) +#define boot_ncpus (MIN(sysconf(_SC_NPROCESSORS_ONLN), max_ncpus)) /* * Process priorities as defined by setpriority(2) and getpriority(2). diff --git a/lib/libzpool/taskq.c b/lib/libzpool/taskq.c index d14f53028435..02a16e3f4ee6 100644 --- a/lib/libzpool/taskq.c +++ b/lib/libzpool/taskq.c @@ -29,6 +29,7 @@ */ #include +#include int taskq_now; taskq_t *system_taskq; @@ -219,6 +220,8 @@ taskq_thread(void *arg) taskq_ent_t *t; boolean_t prealloc; + prctl(PR_SET_NAME, tq->tq_name, 0, 0, 0); + mutex_enter(&tq->tq_lock); while (tq->tq_flags & TASKQ_ACTIVE) { if ((t = tq->tq_task.tqent_next) == &tq->tq_task) { @@ -359,8 +362,8 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id) void system_taskq_init(void) { - system_taskq = taskq_create("system_taskq", 64, maxclsyspri, 4, 512, - TASKQ_DYNAMIC | TASKQ_PREPOPULATE); + system_taskq = taskq_create("system_taskq", boot_ncpus, maxclsyspri, + 4, 512, TASKQ_DYNAMIC | TASKQ_PREPOPULATE); system_delay_taskq = taskq_create("delay_taskq", 4, maxclsyspri, 4, 512, TASKQ_DYNAMIC | TASKQ_PREPOPULATE); } diff --git a/module/zfs/arc.c b/module/zfs/arc.c index 2b0a78d4be47..287e07ed0bdf 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -6589,8 +6589,8 @@ arc_init(void) offsetof(arc_prune_t, p_node)); mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL); - arc_prune_taskq = taskq_create("arc_prune", max_ncpus, defclsyspri, - max_ncpus, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC); + arc_prune_taskq = taskq_create("arc_prune", boot_ncpus, defclsyspri, + boot_ncpus, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC); arc_reclaim_thread_exit = B_FALSE; diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c index c16708048cc5..964b423fff83 100644 --- a/module/zfs/dsl_pool.c +++ b/module/zfs/dsl_pool.c @@ -179,8 +179,8 @@ dsl_pool_open_impl(spa_t *spa, uint64_t txg) mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL); - dp->dp_iput_taskq = taskq_create("z_iput", max_ncpus, defclsyspri, - max_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC); + dp->dp_iput_taskq = taskq_create("z_iput", boot_ncpus, defclsyspri, + boot_ncpus, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC); return (dp); } diff --git a/module/zfs/spa.c b/module/zfs/spa.c index 771f4c8d18ae..06d958c77d98 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -143,9 +143,9 @@ static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ - { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */ - { ZTI_BATCH, ZTI_N(5), ZTI_P(12, 8), ZTI_N(5) }, /* WRITE */ - { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ + { ZTI_N(4), ZTI_NULL, ZTI_BATCH, ZTI_NULL }, /* READ */ + { ZTI_BATCH, ZTI_N(3), ZTI_ONE, ZTI_ONE }, /* WRITE */ + { ZTI_P(2, 4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ }; diff --git a/module/zfs/txg.c b/module/zfs/txg.c index 65bd7f93acdf..aa840ddef543 100644 --- a/module/zfs/txg.c +++ b/module/zfs/txg.c @@ -123,9 +123,9 @@ txg_init(dsl_pool_t *dp, uint64_t txg) int c; bzero(tx, sizeof (tx_state_t)); - tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP); + tx->tx_cpu = vmem_zalloc(boot_ncpus * sizeof (tx_cpu_t), KM_SLEEP); - for (c = 0; c < max_ncpus; c++) { + for (c = 0; c < boot_ncpus; c++) { int i; mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL); @@ -170,7 +170,7 @@ txg_fini(dsl_pool_t *dp) cv_destroy(&tx->tx_quiesce_done_cv); cv_destroy(&tx->tx_exit_cv); - for (c = 0; c < max_ncpus; c++) { + for (c = 0; c < boot_ncpus; c++) { int i; mutex_destroy(&tx->tx_cpu[c].tc_open_lock); @@ -184,7 +184,7 @@ txg_fini(dsl_pool_t *dp) if (tx->tx_commit_cb_taskq != NULL) taskq_destroy(tx->tx_commit_cb_taskq); - vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t)); + vmem_free(tx->tx_cpu, boot_ncpus * sizeof (tx_cpu_t)); bzero(tx, sizeof (tx_state_t)); } @@ -373,7 +373,7 @@ txg_quiesce(dsl_pool_t *dp, uint64_t txg) /* * Grab all tc_open_locks so nobody else can get into this txg. */ - for (c = 0; c < max_ncpus; c++) + for (c = 0; c < boot_ncpus; c++) mutex_enter(&tx->tx_cpu[c].tc_open_lock); ASSERT(txg == tx->tx_open_txg); @@ -387,7 +387,7 @@ txg_quiesce(dsl_pool_t *dp, uint64_t txg) * Now that we've incremented tx_open_txg, we can let threads * enter the next transaction group. */ - for (c = 0; c < max_ncpus; c++) + for (c = 0; c < boot_ncpus; c++) mutex_exit(&tx->tx_cpu[c].tc_open_lock); spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_OPEN, tx_open_time); @@ -396,7 +396,7 @@ txg_quiesce(dsl_pool_t *dp, uint64_t txg) /* * Quiesce the transaction group by waiting for everyone to txg_exit(). */ - for (c = 0; c < max_ncpus; c++) { + for (c = 0; c < boot_ncpus; c++) { tx_cpu_t *tc = &tx->tx_cpu[c]; mutex_enter(&tc->tc_lock); while (tc->tc_count[g] != 0) @@ -430,7 +430,7 @@ txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg) tx_state_t *tx = &dp->dp_tx; list_t *cb_list; - for (c = 0; c < max_ncpus; c++) { + for (c = 0; c < boot_ncpus; c++) { tx_cpu_t *tc = &tx->tx_cpu[c]; /* * No need to lock tx_cpu_t at this point, since this can @@ -447,7 +447,7 @@ txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg) * Commit callback taskq hasn't been created yet. */ tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb", - max_ncpus, defclsyspri, max_ncpus, max_ncpus * 2, + boot_ncpus, defclsyspri, boot_ncpus, boot_ncpus * 2, TASKQ_PREPOPULATE | TASKQ_DYNAMIC); } diff --git a/module/zfs/vdev_file.c b/module/zfs/vdev_file.c index 663757b69c66..a2f3b478c977 100644 --- a/module/zfs/vdev_file.c +++ b/module/zfs/vdev_file.c @@ -260,8 +260,9 @@ vdev_ops_t vdev_file_ops = { void vdev_file_init(void) { - vdev_file_taskq = taskq_create("z_vdev_file", MAX(boot_ncpus, 16), - minclsyspri, boot_ncpus, INT_MAX, TASKQ_DYNAMIC); + /* file backend is used for testing so we can safe some threads here */ + vdev_file_taskq = taskq_create("z_vdev_file", 1, + minclsyspri, MIN(boot_ncpus, 4), INT_MAX, TASKQ_DYNAMIC); VERIFY(vdev_file_taskq); }