From: "M. Mohan Kumar" <mohan@xxxxxxxxxx> BD xlator: CLI command to create a new LV device A new cli command is added to create a LV device. The following command creates lv in a given vg. # gluster bd create <volname>:<vgname>/<lvname> List of all logical volumes in the given Volume Group are constructed when the BD xlator is loaded iniitally. So when a LV is created outside BD xlator code, new LV is not updated in BD xlator's internal data structure. I am looking to use xlator notify functionality, so that CLI will invoke bd xlator's notify function with required parameters like name of the new LV device, size etc. So BD xlator will create a new LV and as part of that this new LV information will be updated in its internal data structure. Issue with this code is glusterfs_handle_rpc_message is not invoked. I could track these functions returning 0. glusterd_submit_request -> rpc_clnt_submit -> rpc_transport_submit_request -> socket_submit_request But glusterd3_1_brick_op_cbk gets rsp.op_ret as -2. Is it the right approach to use notify functionality of xlators? Or is there any other approach by which I can 'signal' a xlator about some out-of-band change happened? Signed-off-by: M. Mohan Kumar <mohan@xxxxxxxxxx> --- cli/src/Makefile.am | 3 + cli/src/cli-cmd-volume-bdevice.c | 243 ++++++++++++++++++++++++++ cli/src/cli-cmd.c | 5 + cli/src/cli-cmd.h | 4 + cli/src/cli-rpc-ops.c | 113 ++++++++++++- rpc/rpc-lib/src/protocol-common.h | 3 + xlators/mgmt/glusterd/src/glusterd-handler.c | 91 ++++++++++ xlators/mgmt/glusterd/src/glusterd-op-sm.c | 88 +++++++++- xlators/mgmt/glusterd/src/glusterd.h | 1 + 9 files changed, 547 insertions(+), 4 deletions(-) create mode 100644 cli/src/cli-cmd-volume-bdevice.c diff --git a/cli/src/Makefile.am b/cli/src/Makefile.am index 8002836..d3dfc95 100644 --- a/cli/src/Makefile.am +++ b/cli/src/Makefile.am @@ -3,6 +3,9 @@ sbin_PROGRAMS = gluster gluster_SOURCES = cli.c registry.c input.c cli-cmd.c cli-rl.c \ cli-cmd-volume.c cli-cmd-peer.c cli-rpc-ops.c cli-cmd-parser.c\ cli-cmd-system.c cli-cmd-misc.c cli-xml-output.c +if ENABLE_BD_XLATOR +gluster_SOURCES += cli-cmd-volume-bdevice.c +endif gluster_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la $(GF_LDADD)\ $(RLLIBS) $(top_builddir)/rpc/xdr/src/libgfxdr.la \ diff --git a/cli/src/cli-cmd-volume-bdevice.c b/cli/src/cli-cmd-volume-bdevice.c new file mode 100644 index 0000000..0019e6d --- /dev/null +++ b/cli/src/cli-cmd-volume-bdevice.c @@ -0,0 +1,243 @@ +/* + GlusterFS is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3 of the License, + or (at your option) any later version. + + GlusterFS is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see + <http://www.gnu.org/licenses/>. +*/ + +#include <stdio.h> +#include <string.h> +#include <stdlib.h> +#include <stdint.h> +#include <pthread.h> + +#include <sys/socket.h> +#include <netdb.h> +#include <sys/types.h> +#include <netinet/in.h> + +#ifndef _CONFIG_H +#define _CONFIG_H +#include "config.h" +#endif + +#include "cli.h" +#include "cli-cmd.h" +#include "cli-mem-types.h" +#include "cli1-xdr.h" +#include "run.h" + +extern struct rpc_clnt *global_rpc; + +extern rpc_clnt_prog_t *cli_rpc_prog; + +int32_t validate_volname(char *volname) +{ + int i; + + if (volname[0] == '-') + goto out; + + if (!strcmp (volname, "all")) { + cli_err ("\"all\" cannot be the name of a volume."); + goto out; + } + + if (strchr (volname, '/')) + goto out; + + if (strlen (volname) > 512) + goto out; + + for (i = 0; i < strlen (volname); i++) + if (!isalnum (volname[i]) && (volname[i] != '_') && (volname[i] != '-')) + goto out; + return 0; +out: + return -1; +} + +#define KB (1024) +#define MB (KB * KB) +#define GB (MB * KB) + +int32_t +cli_cmd_bd_parse_create (dict_t *dict, const char **words) +{ + char *volname = NULL; + char *buff = NULL; + uint64_t size = 0; + char *endptr = NULL; + char *path = NULL; + int ret = -1; + + if (!strchr(words[2], ':')) + goto out; + + buff = gf_strdup (words[2]); + + volname = strtok (buff, ":"); + if (!volname) + goto out; + if (validate_volname (volname) < 0) + goto out; + + path = strtok (NULL, ":"); + if (!path) + goto out; + + buff = (char *)words[3]; + size = strtoull (buff, &endptr, 10); + if (buff == endptr || errno != 0) + return -1; + + if (!strcasecmp (endptr, "g") || !strcasecmp (endptr, "gb")) + size *= GB; + else if (!strcasecmp (endptr, "m") || !strcasecmp (endptr, "mb")) + size *= MB; + else if (!strcasecmp (endptr, "k") || !strcasecmp (endptr, "kb")) + size *= KB; + else if (strcasecmp (endptr, "")) + return -1; + + ret = dict_set_str (dict, "volname", volname); + if (ret) + goto out; + + ret = dict_set_str (dict, "path", path); + if (ret) + goto out; + + ret = dict_set_uint64 (dict, "size", size); + if (ret) + goto out; + + ret = 0; +out: + return ret; +} + + +int32_t +cli_cmd_bd_parse (const char **words, int wordcount, dict_t **options) +{ + dict_t *dict = NULL; + int ret = -1; + char *op[] = { "create", "clone", "snapshot", NULL }; + int index = 0; + + for (index = 0; op[index]; index++) + if (!strcasecmp (words[1], op[index])) + break; + + if (!op[index]) + return -1; + + dict = dict_new (); + if (!dict) + goto out; + + if (!strcasecmp (words[1], "create")) { + if (wordcount != 4) + goto out; + ret = cli_cmd_bd_parse_create (dict, words); + } else { + ret = -1; + goto out; + + } + + *options = dict; + ret = 0; +out: + if (ret) + dict_unref (dict); + return ret; +} + +int +cli_cmd_bd_create_cbk (struct cli_state *state, struct cli_cmd_word *word, + const char **words, int wordcount) +{ + int ret = -1; + rpc_clnt_procedure_t *proc = NULL; + call_frame_t *frame = NULL; + int sent = 0; + int parse_error = 0; + dict_t *options = NULL; + + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_BD_CREATE]; + + frame = create_frame (THIS, THIS->ctx->pool); + if (!frame) + goto out; + + ret = cli_cmd_bd_parse (words, wordcount, &options); + if (ret) { + cli_usage_out (word->pattern); + parse_error = 1; + goto out; + } + + if (proc->fn) { + ret = proc->fn (frame, THIS, options); + } + +out: + if (options) + dict_unref (options); + + if (ret) { + cli_cmd_sent_status_get (&sent); + if ((sent == 0) && (parse_error == 0)) + cli_out ("Creating BD failed!"); + } + + CLI_STACK_DESTROY (frame); + + return ret; +} + +struct cli_cmd bd_cmds[] = { + { "bd create <volname>:<bd>", + cli_cmd_bd_create_cbk, + "create a block device"}, + { NULL, NULL, NULL } +}; + +int +cli_cmd_bd_help_cbk (struct cli_state *state, struct cli_cmd_word *in_word, + const char **words, int wordcount) +{ + struct cli_cmd *cmd = NULL; + + for (cmd = bd_cmds; cmd->pattern; cmd++) + if (_gf_false == cmd->disable) + cli_out ("%s - %s", cmd->pattern, cmd->desc); + + return 0; +} + +int +cli_cmd_bd_register (struct cli_state *state) +{ + int ret = 0; + struct cli_cmd *cmd = NULL; + + for (cmd = bd_cmds; cmd->pattern; cmd++) { + ret = cli_cmd_register (&state->tree, cmd); + if (ret) + goto out; + } +out: + return ret; +} diff --git a/cli/src/cli-cmd.c b/cli/src/cli-cmd.c index f2b434a..9eedee1 100644 --- a/cli/src/cli-cmd.c +++ b/cli/src/cli-cmd.c @@ -242,6 +242,11 @@ cli_cmds_register (struct cli_state *state) if (ret) goto out; +#ifdef HAVE_BD_XLATOR + ret = cli_cmd_bd_register (state); + if (ret) + goto out; +#endif out: return ret; } diff --git a/cli/src/cli-cmd.h b/cli/src/cli-cmd.h index ba877e2..26f41c6 100644 --- a/cli/src/cli-cmd.h +++ b/cli/src/cli-cmd.h @@ -115,4 +115,8 @@ cli_cmd_submit (void *req, call_frame_t *frame, gf_answer_t cli_cmd_get_confirmation (struct cli_state *state, const char *question); int cli_cmd_sent_status_get (int *status); + +#ifdef HAVE_BD_XLATOR +int cli_cmd_bd_register (struct cli_state *state); +#endif #endif /* __CLI_CMD_H__ */ diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c index 9521174..1b8fc93 100644 --- a/cli/src/cli-rpc-ops.c +++ b/cli/src/cli-rpc-ops.c @@ -77,7 +77,6 @@ int32_t gf_cli3_1_get_volume (call_frame_t *frame, xlator_t *this, void *data); - rpc_clnt_prog_t cli_handshake_prog = { .progname = "cli handshake", .prognum = GLUSTER_HNDSK_PROGRAM, @@ -2511,6 +2510,115 @@ out: return ret; } +#ifdef HAVE_BD_XLATOR +int +gf_cli3_1_bd_create_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf_cli_rsp rsp = {0,}; + int ret = -1; + cli_local_t *local = NULL; + char *path = NULL; + dict_t *dict = NULL; + + if (-1 == req->rpc_status) { + goto out; + } + + local = ((call_frame_t *) (myframe))->local; + ((call_frame_t *) (myframe))->local = NULL; + + ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + dict = local->dict; + ret = dict_get_str (dict, "path", &path); + if (ret) + goto out; + + gf_log ("cli", GF_LOG_INFO, "Received resp to create bd"); + +#if (HAVE_LIB_XML) + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_dict ("BdCreate", dict, rsp.op_ret, + rsp.op_errno, rsp.op_errstr); + if (ret) + gf_log ("cli", GF_LOG_ERROR, + "Error outputting to xml"); + goto out; + } +#endif + + if (rsp.op_ret && strcmp (rsp.op_errstr, "")) + cli_err ("%s", rsp.op_errstr); + else + cli_out ("Creation of %s has been %s", path, + (rsp.op_ret) ? "unsuccessful": + "successful."); + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + if (dict) + dict_unref (dict); + if (local) + cli_local_wipe (local); + if (rsp.dict.dict_val) + free (rsp.dict.dict_val); + if (rsp.op_errstr) + free (rsp.op_errstr); + return ret; +} + +int32_t +gf_cli3_1_bd_create (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf_cli_req req = {{0,}}; + int ret = 0; + dict_t *dict = NULL; + cli_local_t *local = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = dict_ref ((dict_t *)data); + if (!dict) + goto out; + + ret = dict_allocate_and_serialize (dict, + &req.dict.dict_val, + (size_t *)&req.dict.dict_len); + + local = cli_local_get (); + + if (local) { + local->dict = dict_ref (dict); + frame->local = local; + } + + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_BD_CREATE, NULL, + this, gf_cli3_1_bd_create_cbk, + (xdrproc_t) xdr_gf_cli_req); + +out: + if (dict) + dict_unref (dict); + + if (req.dict.dict_val) + GF_FREE (req.dict.dict_val); + + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} +#endif int32_t gf_cli3_1_create_volume (call_frame_t *frame, xlator_t *this, @@ -6402,6 +6510,9 @@ struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = { [GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", gf_cli3_1_statedump_volume}, [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", gf_cli3_1_list_volume}, [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", gf_cli3_1_clearlocks_volume}, +#ifdef HAVE_BD_XLATOR + [GLUSTER_CLI_BD_CREATE] = {"BD_CREATE", gf_cli3_1_bd_create}, +#endif }; struct rpc_clnt_program cli_prog = { diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h index e2815d8..2893d2f 100644 --- a/rpc/rpc-lib/src/protocol-common.h +++ b/rpc/rpc-lib/src/protocol-common.h @@ -150,6 +150,9 @@ enum gluster_cli_procnum { GLUSTER_CLI_STATEDUMP_VOLUME, GLUSTER_CLI_LIST_VOLUME, GLUSTER_CLI_CLRLOCKS_VOLUME, +#if HAVE_BD_XLATOR + GLUSTER_CLI_BD_CREATE, +#endif GLUSTER_CLI_MAXVALUE, }; diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index 51b8598..894ef86 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -60,6 +60,10 @@ #include "globals.h" #include "glusterd-syncop.h" +#ifdef HAVE_BD_XLATOR +#include <lvm2app.h> +#endif + static int glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid, char *hostname, int port, @@ -895,6 +899,90 @@ out: return ret; } +#ifdef HAVE_BD_XLATOR +int +glusterd_handle_cli_bd_create (rpcsvc_request_t *req) +{ + int32_t ret = -1; + gf_cli_req cli_req = {{0,}}; + dict_t *dict = NULL; + char *volname = NULL; + uint64_t size = 0; + char *path = NULL; + char *op_errstr = NULL; + glusterd_op_t cli_op = GD_OP_NEW_LV; + + GF_ASSERT (req); + + if (!xdr_to_generic (req->msg[0], &cli_req, + (xdrproc_t)xdr_gf_cli_req)) { + //failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + gf_log ("glusterd", GF_LOG_INFO, "Received get bd create req"); + + if (cli_req.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new (); + + ret = dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, + &dict); + if (ret < 0) { + gf_log ("glusterd", GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + goto out; + } else { + dict->extra_stdfree = cli_req.dict.dict_val; + } + } + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "failed to get volname"); + goto out; + } + + ret = dict_get_str (dict, "path", &path); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "failed to get path"); + goto out; + } + + ret = dict_get_uint64 (dict, "size", &size); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "failed to get size"); + goto out; + } + + ret = glusterd_op_begin (req, GD_OP_NEW_LV, dict); + gf_cmd_log ("bd create: %s", ((ret == 0) ? "SUCCESS": "FAILED")); +out: + if (ret && dict) + dict_unref (dict); + + glusterd_friend_sm (); + glusterd_op_sm (); + + if (ret) { + if (!op_errstr) + op_errstr = gf_strdup ("operation failed"); + ret = glusterd_op_send_cli_response (cli_op, ret, 0, + req, NULL, op_errstr); + GF_FREE (op_errstr); + } + + + return ret; +} +#endif + int glusterd_handle_cli_list_volume (rpcsvc_request_t *req) { @@ -3005,6 +3093,9 @@ rpcsvc_actor_t gd_svc_cli_actors[] = { [GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", GLUSTER_CLI_STATEDUMP_VOLUME, glusterd_handle_cli_statedump_volume, NULL, NULL, 0}, [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, NULL, 0}, [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", GLUSTER_CLI_CLRLOCKS_VOLUME, glusterd_handle_cli_clearlocks_volume, NULL, NULL, 0}, +#ifdef HAVE_BD_XLATOR + [GLUSTER_CLI_BD_CREATE] = {"BD_CREATE", GLUSTER_CLI_BD_CREATE, glusterd_handle_cli_bd_create, NULL, NULL, 0}, +#endif }; struct rpcsvc_program gd_svc_cli_prog = { diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index d0e8882..302440e 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -227,6 +227,22 @@ glusterd_brick_op_build_payload (glusterd_op_t op, glusterd_brickinfo_t *brickin brick_req->name = gf_strdup (name); break; + case GD_OP_NEW_LV: + { + brick_req = GF_CALLOC (1, sizeof (*brick_req), + gf_gld_mt_mop_brick_req_t); + if (!brick_req) + goto out; + + brick_req->op = GLUSTERD_BRICK_XLATOR_OP; + brick_req->name = "dummy"; + ret = dict_get_int32 (dict, "newlv-op", (int32_t*)&heal_op); + //if (ret) + // goto out; + ret = dict_set_int32 (dict, "xl-op", GD_OP_NEW_LV/*heal_op*/); + } + break; + default: goto out; break; @@ -1964,6 +1980,7 @@ glusterd_op_build_payload (dict_t **req, char **op_errstr) case GD_OP_STATEDUMP_VOLUME: case GD_OP_CLEARLOCKS_VOLUME: case GD_OP_DEFRAG_BRICK_VOLUME: + case GD_OP_NEW_LV: { dict_t *dict = ctx; ret = dict_get_str (dict, "volname", &volname); @@ -2986,7 +3003,9 @@ glusterd_op_stage_validate (glusterd_op_t op, dict_t *dict, char **op_errstr, ret = glusterd_op_stage_clearlocks_volume (dict, op_errstr); break; - + case GD_OP_NEW_LV: + ret = 0; + break; default: gf_log ("", GF_LOG_ERROR, "Unknown op %d", op); @@ -3082,7 +3101,10 @@ glusterd_op_commit_perform (glusterd_op_t op, dict_t *dict, char **op_errstr, case GD_OP_CLEARLOCKS_VOLUME: ret = glusterd_op_clearlocks_volume (dict, op_errstr); break; - + case GD_OP_NEW_LV: + /* FIXME */ + ret = 0; + break; default: gf_log ("", GF_LOG_ERROR, "Unknown op %d", op); @@ -3824,6 +3846,63 @@ _select_rxlators_for_full_self_heal (xlator_t *this, } static int +glusterd_bricks_select_new_lv_data (dict_t *dict, char **op_errstr) +{ + int ret = -1; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; + glusterd_pending_node_t *pending_node = NULL; + glusterd_volinfo_t *volinfo = NULL; + char *volname; + glusterd_brickinfo_t *brickinfo = NULL; + int brick_index = -1; + + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Unable to get volname"); + goto out; + } + ret = glusterd_volinfo_find (volname, &volinfo); + if (ret) + goto out; + + pending_node = GF_CALLOC (1, sizeof (*pending_node), + gf_gld_mt_pending_node_t); + if (!pending_node) { + ret = -1; + goto out; + } + + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + brick_index++; + if (uuid_compare (brickinfo->uuid, MY_UUID) || + !glusterd_is_brick_started (brickinfo)) { + continue; + } + pending_node->node = brickinfo; + pending_node->type = GD_NODE_BRICK; + pending_node->index = brick_index; + list_add_tail (&pending_node->list, + &opinfo.pending_bricks); + pending_node = NULL; + } + + + + + ret = 0; + +out: + gf_log (THIS->name, GF_LOG_DEBUG, "Returning ret %d", ret); + return ret; +} + +static int glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr) { int ret = -1; @@ -3900,7 +3979,6 @@ out: } - static int glusterd_bricks_select_rebalance_volume (dict_t *dict, char **op_errstr) { @@ -4233,6 +4311,9 @@ glusterd_op_bricks_select (glusterd_op_t op, dict_t *dict, char **op_errstr) case GD_OP_DEFRAG_BRICK_VOLUME: ret = glusterd_bricks_select_rebalance_volume (dict, op_errstr); break; + case GD_OP_NEW_LV: + ret = glusterd_bricks_select_new_lv_data (dict, op_errstr); + break; default: break; } @@ -4796,6 +4877,7 @@ glusterd_op_free_ctx (glusterd_op_t op, void *ctx) case GD_OP_STATEDUMP_VOLUME: case GD_OP_CLEARLOCKS_VOLUME: case GD_OP_DEFRAG_BRICK_VOLUME: + case GD_OP_NEW_LV: dict_unref (ctx); break; default: diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h index a774c55..4ec6ba0 100644 --- a/xlators/mgmt/glusterd/src/glusterd.h +++ b/xlators/mgmt/glusterd/src/glusterd.h @@ -81,6 +81,7 @@ typedef enum glusterd_op_ { GD_OP_LIST_VOLUME, GD_OP_CLEARLOCKS_VOLUME, GD_OP_DEFRAG_BRICK_VOLUME, + GD_OP_NEW_LV, GD_OP_MAX, } glusterd_op_t; -- 1.7.7.6