Introduce DLM Cluster implementation of cluster ops of backstore devices. Use one DLM lockspace per device. The device is idenitfied by concatenation of T10 model and T10 unit_serial. Use DLM CKV lock for Compare And Write in cluster mode. One lock for one LBA. That allows a parallell execution of CAW commands. User should set configfs/target/cluster/dlm/cluster_name identical to configfs/dlm/cluster/cluster_name. Signed-off-by: Dmitry Bogdanov <d.bogdanov@xxxxxxxxx> --- drivers/target/Makefile | 1 + drivers/target/target_cluster_dlm.c | 175 ++++++++++++++++++++++++++++ 2 files changed, 176 insertions(+) create mode 100644 drivers/target/target_cluster_dlm.c diff --git a/drivers/target/Makefile b/drivers/target/Makefile index be4d1bfcf79a..16b625108eec 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile @@ -33,3 +33,4 @@ obj-$(CONFIG_SBP_TARGET) += sbp/ obj-$(CONFIG_REMOTE_TARGET) += tcm_remote/ obj-$(CONFIG_DLM_CKV) += dlm_ckv.o +obj-$(CONFIG_DLM_CKV) += target_cluster_dlm.o diff --git a/drivers/target/target_cluster_dlm.c b/drivers/target/target_cluster_dlm.c new file mode 100644 index 000000000000..2edae188811d --- /dev/null +++ b/drivers/target/target_cluster_dlm.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/dlm.h> +#include <linux/kthread.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/configfs.h> +#include <target/target_core_base.h> + +#include "dlm_ckv.h" + +struct target_cluster_data { + struct dlm_ckv_bucket *bucket; + u32 local_nodeid; +}; + +struct target_lksb { + struct dlm_lksb lksb; + struct completion compl; +}; + +struct target_cluster_config { + struct config_group cg_group; + char cluster_name[DLM_LOCKSPACE_LEN]; +}; + +static struct target_cluster_config *to_cfg(struct config_item *i) +{ + return i ? container_of(to_config_group(i), struct target_cluster_config, cg_group) : NULL; +} + +static ssize_t target_cluster_cluster_name_show(struct config_item *item, char *buf) +{ + return sprintf(buf, "%s\n", to_cfg(item)->cluster_name); +} + +static ssize_t target_cluster_cluster_name_store(struct config_item *item, + const char *page, + size_t len) +{ + struct target_cluster_config *cfg = to_cfg(item); + + strscpy(cfg->cluster_name, page, DLM_LOCKSPACE_LEN); + if (cfg->cluster_name[strlen(cfg->cluster_name) - 1] == '\n') + cfg->cluster_name[strlen(cfg->cluster_name) - 1] = 0; + + return len; +} + +CONFIGFS_ATTR(target_cluster_, cluster_name); + +static struct configfs_attribute *cluster_attrs[] = { + &target_cluster_attr_cluster_name, + NULL, +}; + +static const struct config_item_type ci_cluster_config = { + .ct_item_ops = NULL, + .ct_attrs = cluster_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct target_cluster_config cluster_cfg; + +static int target_init_dlm(struct se_device *dev) +{ + struct target_cluster_data *cluster_data; + char ls_name[INQUIRY_MODEL_LEN + INQUIRY_VPD_SERIAL_LEN + 1]; + int err = 0; + + BUG_ON(dev->cluster_data); + + snprintf(ls_name, sizeof(ls_name), "%s%s", + dev->t10_wwn.model, dev->t10_wwn.unit_serial); + + cluster_data = kzalloc(sizeof(*cluster_data), GFP_KERNEL); + if (!cluster_data) + return -ENOMEM; + + cluster_data->bucket = dlm_ckv_open_bucket(ls_name, + cluster_cfg.cluster_name, + dev); + if (!cluster_data->bucket) { + err = -EIO; + kfree(cluster_data); + dev->cluster_data = NULL; + return err; + } + dev->cluster_data = cluster_data; + + return err; +} + +static int target_cleanup_dlm(struct se_device *dev) +{ + struct target_cluster_data *cluster_data = dev->cluster_data; + int res; + + res = dlm_ckv_close_bucket(cluster_data->bucket); + if (res) + pr_err("TARGET_CORE[%d]: closing bucket failed: %d\n", + dev->dev_index, res); + + kfree(dev->cluster_data); + dev->cluster_data = NULL; + + return 0; +} + +static void *target_caw_lock_dlm(struct se_device *dev, u64 lba) +{ + struct target_cluster_data *cluster_data = dev->cluster_data; + struct dlm_ckv_lock *caw_lock; + char name[DLM_RESNAME_MAXLEN]; + int res = -ENOMEM; + + snprintf(name, sizeof(name) - 1, "caw_lba_%llx", lba); + + caw_lock = dlm_ckv_create_lock(cluster_data->bucket, name); + if (!caw_lock) + goto done; + + res = dlm_ckv_lock_get(caw_lock); + if (res) { + dlm_ckv_free_lock(caw_lock); + caw_lock = NULL; + pr_warn("target_dlm_lock_wait failed with %d\n", res); + } + +done: + return caw_lock; +} + +static int target_caw_unlock_dlm(struct se_device *dev, void *lock) +{ + struct dlm_ckv_lock *caw_lock = lock; + int res; + + res = dlm_ckv_lock_release(caw_lock); + + dlm_ckv_free_lock(caw_lock); + + return res; +} + +struct target_cluster_ops dlm_cluster_ops = { + .name = "dlm", + .owner = THIS_MODULE, + .cg_group = &cluster_cfg.cg_group, + + .init = target_init_dlm, + .cleanup = target_cleanup_dlm, + .caw_lock = target_caw_lock_dlm, + .caw_unlock = target_caw_unlock_dlm, +}; + +static int __init target_cluster_dlm_module_init(void) +{ + config_group_init_type_name(&cluster_cfg.cg_group, + dlm_cluster_ops.name, + &ci_cluster_config); + + return target_cluster_impl_register(&dlm_cluster_ops); +} + +static void __exit target_cluster_dlm_module_exit(void) +{ + target_cluster_impl_unregister(&dlm_cluster_ops); +} + +MODULE_DESCRIPTION("TCM Cluster implementation over DLM"); +MODULE_AUTHOR("Dmitry Bogdanov <d.bogdanov@xxxxxxxxx>"); +MODULE_LICENSE("GPL"); + +module_init(target_cluster_dlm_module_init); +module_exit(target_cluster_dlm_module_exit); -- 2.25.1