From: Alexander Boyko <alexander.boyko@xxxxxxx> Current implementation of qos allocation is called for every statfs update. It takes lq_rw_sem for write and recalculate penalties, even whith setting qos_threshold_rr=100. Which means always use rr allocation. Let's skip unnecessary locking and calculation for 100% round robin allocation. HPE-bug-id: LUS-10388 WC-bug-id: https://jira.whamcloud.com/browse/LU-15393 Lustre-commit: 2f23140d5c1396fd0 ("LU-15393 lod: skip qos for qos_threshold_rr=100") Signed-off-by: Alexander Boyko <alexander.boyko@xxxxxxx> Reviewed-on: https://review.whamcloud.com/46388 Reviewed-by: Andrew Perepechko <andrew.perepechko@xxxxxxx> Reviewed-by: Alexey Lyashkov <alexey.lyashkov@xxxxxxx> Reviewed-by: Andreas Dilger <adilger@xxxxxxxxxxxxx> Reviewed-by: Oleg Drokin <green@xxxxxxxxxxxxx> Signed-off-by: James Simmons <jsimmons@xxxxxxxxxxxxx> --- fs/lustre/include/lu_object.h | 1 + fs/lustre/lmv/lproc_lmv.c | 5 +++-- fs/lustre/obdclass/lu_tgt_descs.c | 12 ++++++++---- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/fs/lustre/include/lu_object.h b/fs/lustre/include/lu_object.h index 3fb40c6..e4dd287c5 100644 --- a/fs/lustre/include/lu_object.h +++ b/fs/lustre/include/lu_object.h @@ -1503,6 +1503,7 @@ struct lu_tgt_desc_idx { }; /* QoS data for LOD/LMV */ +#define QOS_THRESHOLD_MAX 256 /* should be power of two */ struct lu_qos { struct list_head lq_svr_list; /* lu_svr_qos list */ struct rw_semaphore lq_rw_sem; diff --git a/fs/lustre/lmv/lproc_lmv.c b/fs/lustre/lmv/lproc_lmv.c index b9efae9..6d4e8d9 100644 --- a/fs/lustre/lmv/lproc_lmv.c +++ b/fs/lustre/lmv/lproc_lmv.c @@ -158,7 +158,8 @@ static ssize_t qos_threshold_rr_show(struct kobject *kobj, obd_kset.kobj); return scnprintf(buf, PAGE_SIZE, "%u%%\n", - (obd->u.lmv.lmv_qos.lq_threshold_rr * 100 + 255) >> 8); + (obd->u.lmv.lmv_qos.lq_threshold_rr * 100 + + (QOS_THRESHOLD_MAX - 1)) / QOS_THRESHOLD_MAX); } static ssize_t qos_threshold_rr_store(struct kobject *kobj, @@ -190,7 +191,7 @@ static ssize_t qos_threshold_rr_store(struct kobject *kobj, if (val > 100) return -EINVAL; - lmv->lmv_qos.lq_threshold_rr = (val << 8) / 100; + lmv->lmv_qos.lq_threshold_rr = (val * QOS_THRESHOLD_MAX) / 100; set_bit(LQ_DIRTY, &lmv->lmv_qos.lq_flags); return count; diff --git a/fs/lustre/obdclass/lu_tgt_descs.c b/fs/lustre/obdclass/lu_tgt_descs.c index 935cff6..51d2e21 100644 --- a/fs/lustre/obdclass/lu_tgt_descs.c +++ b/fs/lustre/obdclass/lu_tgt_descs.c @@ -275,11 +275,13 @@ int lu_tgt_descs_init(struct lu_tgt_descs *ltd, bool is_mdt) ltd->ltd_lmv_desc.ld_pattern = LMV_HASH_TYPE_DEFAULT; ltd->ltd_qos.lq_prio_free = LMV_QOS_DEF_PRIO_FREE * 256 / 100; ltd->ltd_qos.lq_threshold_rr = - LMV_QOS_DEF_THRESHOLD_RR_PCT * 256 / 100; + LMV_QOS_DEF_THRESHOLD_RR_PCT * + QOS_THRESHOLD_MAX / 100; } else { ltd->ltd_qos.lq_prio_free = LOV_QOS_DEF_PRIO_FREE * 256 / 100; ltd->ltd_qos.lq_threshold_rr = - LOV_QOS_DEF_THRESHOLD_RR_PCT * 256 / 100; + LOV_QOS_DEF_THRESHOLD_RR_PCT * + QOS_THRESHOLD_MAX / 100; } return 0; @@ -568,8 +570,10 @@ int ltd_qos_penalties_calc(struct lu_tgt_descs *ltd) * creation performance */ clear_bit(LQ_SAME_SPACE, &qos->lq_flags); - if ((ba_max * (256 - qos->lq_threshold_rr)) >> 8 < ba_min && - (ia_max * (256 - qos->lq_threshold_rr)) >> 8 < ia_min) { + if (((ba_max * (QOS_THRESHOLD_MAX - qos->lq_threshold_rr)) / + QOS_THRESHOLD_MAX) < ba_min && + ((ia_max * (QOS_THRESHOLD_MAX - qos->lq_threshold_rr)) / + QOS_THRESHOLD_MAX) < ia_min) { set_bit(LQ_SAME_SPACE, &qos->lq_flags); /* Reset weights for the next time we enter qos mode */ set_bit(LQ_RESET, &qos->lq_flags); -- 1.8.3.1