Alasdair, I've included two patches which I've made against 2.6.19-rc6 of Linus's git tree. The first patch is for changing dm and dm-multipath to be able to pass a ptr to the dm mapped device to the dm hardware handler's create interface. The second patch is for changes to the dm-emc hardware handler itself. Taken together, these patches basically differ from my previous submission of this content only in the use of a blk_rq_map_kern for the emc handler issued ios instead of just pointing the rq->buffer field at the io buffer. I've also sent this content to Mike Christie. Mike has indicated that he's already integrated the dm-emc changes into the scsi subsystem's hardware handler format. Let me know if there is anything I can do to help with this stuff. Thanks, Ed <<0001-Exported-dm_put-to-be-used-by-parse_hw_handler.txt>> <<0002-Added-logical-unit-follow-over-capability-and.txt>>
Subject: [PATCH] Exported dm_put() to be used by parse_hw_handler() of dm-mpath. Changed parse_hw_handler() and hw_handler interface to pass ptr to the hardware handler's mapped device to enhance event logging. Signed-off-by: Edward Goggin <egoggin@xxxxxxxxxxxxxxxxxxxx> --- drivers/md/dm-hw-handler.h | 2 +- drivers/md/dm-mpath.c | 10 +++++++++- drivers/md/dm.c | 1 + 3 files changed, 11 insertions(+), 2 deletions(-) applies-to: 8bb04a9fe67f844e84691e53829813ca65b981b4 7e5d4b23929dd889b34c0d9c9ccbc62e970c5863 diff --git a/drivers/md/dm-hw-handler.h b/drivers/md/dm-hw-handler.h index 15f5629..53be8aa 100644 --- a/drivers/md/dm-hw-handler.h +++ b/drivers/md/dm-hw-handler.h @@ -28,7 +28,7 @@ struct hw_handler_type { struct module *module; int (*create) (struct hw_handler *handler, unsigned int argc, - char **argv); + char **argv, struct mapped_device *md); void (*destroy) (struct hw_handler *hwh); void (*pg_init) (struct hw_handler *hwh, unsigned bypassed, diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index d754e0b..be87a4f 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -627,6 +627,7 @@ static int parse_hw_handler(struct arg_s struct hw_handler_type *hwht; unsigned hw_argc; struct dm_target *ti = m->ti; + struct mapped_device *md; static struct param _params[] = { {0, 1024, "invalid number of hardware handler args"}, @@ -645,7 +646,14 @@ static int parse_hw_handler(struct arg_s return -EINVAL; } - r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv); + /* + * No need to hold a reference on the mapped device here + * since one is already held for the duration of the + * mapped device open. + */ + md = dm_table_get_md(ti->table); + r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv, md); + dm_put(md); if (r) { dm_put_hw_handler(hwht); ti->error = "hardware handler constructor failed"; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index fc4f743..163a7a3 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1196,6 +1196,7 @@ void dm_put(struct mapped_device *md) free_dev(md); } } +EXPORT_SYMBOL_GPL(dm_put); /* * Process the deferred bios --- 0.99.9i
Subject: [PATCH] Added logical unit follow-over capability and improved the event logging of dm-emc.c. Signed-off-by: Edward Goggin <egoggin@xxxxxxxxxxxxxxxxxxxx> --- drivers/md/dm-emc.c | 606 +++++++++++++++++++++++++++++++++++++-------------- 1 files changed, 436 insertions(+), 170 deletions(-) applies-to: 319d05e77d198aa5d9cd81ac20da516a11d450f4 421522d4789c1db42f2156b7216c3fc120cd8239 diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c index 2b2d45d..1989fc9 100644 --- a/drivers/md/dm-emc.c +++ b/drivers/md/dm-emc.c @@ -7,234 +7,469 @@ * Multipath support for EMC CLARiiON AX/CX-series hardware. */ +/* Code borrowed from dm-lsi-rdac by Mike Christie */ + #include "dm.h" #include "dm-hw-handler.h" #include <scsi/scsi.h> +#include <scsi/scsi_eh.h> #include <scsi/scsi_cmnd.h> -#define DM_MSG_PREFIX "multipath emc" +#define DM_MSG_PREFIX "multipath emc" +#define TRESPASS_PAGE 0x22 +#define BUFFER_SIZE 0x80 +#define EMC_HANDLER_TIMEOUT (60 * HZ) +#define UNBOUND_LU -1 +/* + * Four variations of the CLARiiON trespass MODE_SELECT page. + */ +unsigned char long_trespass_and_hr_pg[] = { + 0, 0, 0, 0, + TRESPASS_PAGE, /* Page code */ + 0x09, /* Page length - 2 */ + 0x01, /* Trespass code + Honor reservation bit */ + 0xff, 0xff, /* Trespass target */ + 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ +}; +unsigned char long_trespass_pg[] = { + 0, 0, 0, 0, + TRESPASS_PAGE, /* Page code */ + 0x09, /* Page length - 2 */ + 0x81, /* Trespass code + Honor reservation bit */ + 0xff, 0xff, /* Trespass target */ + 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ +}; +unsigned char short_trespass_and_hr_pg[] = { + 0, 0, 0, 0, + TRESPASS_PAGE, /* Page code */ + 0x02, /* Page length - 2 */ + 0x01, /* Trespass code + Honor reservation bit */ + 0xff, /* Trespass target */ +}; +unsigned char short_trespass_pg[] = { + 0, 0, 0, 0, + TRESPASS_PAGE, /* Page code */ + 0x02, /* Page length - 2 */ + 0x81, /* Trespass code + Honor reservation bit */ + 0xff, /* Trespass target */ +}; + +/* + * EMC hardware handler context structure containing CLARiiON LU specific + * information for a particular dm multipath mapped device. + */ struct emc_handler { spinlock_t lock; - - /* Whether we should send the short trespass command (FC-series) - * or the long version (default for AX/CX CLARiiON arrays). */ + struct mapped_device *md; + struct hw_handler *hwh; + struct path *path; + struct work_struct wkq; + /* Use short trespass command (FC-series) or the long version + * (default for AX/CX CLARiiON arrays). */ unsigned short_trespass; - /* Whether or not to honor SCSI reservations when initiating a - * switch-over. Default: Don't. */ + /* Whether or not (default) to honor SCSI reservations when + * initiating a switch-over. */ unsigned hr; - + /* I/O buffer for both MODE_SELECT and INQUIRY commands. */ + char buffer[BUFFER_SIZE]; + /* SCSI sense buffer for commands -- assumes serial issuance + * and completion sequence of all commands for same multipath. */ unsigned char sense[SCSI_SENSE_BUFFERSIZE]; + /* which SP (A=0,B=1,UNBOUND=-1) is dflt SP for path's mapped dev */ + int default_sp; + /* which SP (A=0,B=1,UNBOUND=-1) is active for path's mapped dev */ + int current_sp; + /* + * flag when set (reset) differentiates get_sp_info after (before) + * pg_init + */ + int pg_init_sent; }; -#define TRESPASS_PAGE 0x22 -#define EMC_FAILOVER_TIMEOUT (60 * HZ) +static int send_cmd(struct emc_handler *h, struct path *path, int cmd, + rq_end_io_fn *endio); +static rq_end_io_fn sp_info_endio, pg_init_endio; +static int parse_sp_info_reply(struct request *req, struct emc_handler *h, + struct path *path, int *default_sp, + int *current_sp, int *new_current_sp); +static int parse_pg_init_reply(struct request *req, struct emc_handler *h, + struct path *path); -/* Code borrowed from dm-lsi-rdac by Mike Christie */ +struct workqueue_struct *kemchd; -static inline void free_bio(struct bio *bio) -{ - __free_page(bio->bi_io_vec[0].bv_page); - bio_put(bio); +/* + * Parse EVPD 0xC0 INQUIRY cmd reply. + */ +static int parse_sp_info_reply(struct request *req, struct emc_handler *h, + struct path *path, int *default_sp, + int *current_sp, int *new_current_sp) +{ + int err = 0; + + if (req->errors == 0) { + /* check for in-progress ucode upgrade (NDU) */ + if (h->buffer[48] != 0) { + DMWARN("Detected in-progress ucode upgrade NDU " + "operation while finding current active " + "SP for mapped device %s using path %s.", + dm_device_name(h->md), path->dev->name); + err = MP_BYPASS_PG; + } else { + *default_sp = h->buffer[5]; + + if (h->buffer[4] == 2) + /* SP for path (in h->buffer[8]) is current */ + *current_sp = h->buffer[8]; + else { + if (h->buffer[4] == 1) + /* SP for this path is NOT current */ + if (h->buffer[8] == 0) + *current_sp = 1; + else + *current_sp = 0; + else + /* unbound LU or LUNZ */ + *current_sp = UNBOUND_LU; + } + *new_current_sp = h->buffer[8]; + } + } else { + struct scsi_sense_hdr sshdr; + + err = MP_FAIL_PATH; + + if (req->sense_len && scsi_normalize_sense(req->sense, + req->sense_len, + &sshdr)) + DMERR("Found valid sense data 0x%2x, 0x%2x, 0x%2x " + "while finding current active SP for " + "mapped device %s using path %s.", + sshdr.sense_key, sshdr.asc, sshdr.ascq, + dm_device_name(h->md), path->dev->name); + else DMERR("Error 0x%x finding current active SP for " + "mapped device %s using path %s.", req->errors, + dm_device_name(h->md), path->dev->name); + } + + return (err); } -static int emc_endio(struct bio *bio, unsigned int bytes_done, int error) +/* + * Parse MODE_SELECT cmd reply. + */ +static int parse_pg_init_reply(struct request *req, struct emc_handler *h, + struct path *path) { - struct path *path = bio->bi_private; + struct scsi_sense_hdr sshdr; + int err = 0; - if (bio->bi_size) - return 1; + if (req->sense_len && scsi_normalize_sense(req->sense, + req->sense_len, &sshdr)) { - /* We also need to look at the sense keys here whether or not to - * switch to the next PG etc. - * - * For now simple logic: either it works or it doesn't. - */ - if (error) - dm_pg_init_complete(path, MP_FAIL_PATH); - else - dm_pg_init_complete(path, 0); + DMERR("Found valid sense data 0x%2x, 0x%2x, 0x%2x while " + "sending CLARiiON trespass command for mapped " + "device %s using path %s.", + sshdr.sense_key, sshdr.asc, sshdr.ascq, + dm_device_name(h->md), path->dev->name); + + if ((sshdr.sense_key = 0x05) && + (sshdr.asc = 0x04) && + (sshdr.ascq = 0x00)) { + /* + * Array based copy in progress -- do not send + * pg_init or copy will be aborted mid-stream. + */ + DMWARN("Array Based Copy in progress while " + "sending CLARiiON trespass command for " + "mapped device %s using path %s.", + dm_device_name(h->md), path->dev->name); + err = MP_BYPASS_PG; + } else if ((sshdr.sense_key = 0x02) && + (sshdr.asc = 0x04) && + (sshdr.ascq = 0x03)) { + /* + * LUN Not Ready - Manual Intervention Required + * indicates in-progress ucode upgrade (NDU). + */ + DMWARN("Detected in-progress ucode upgrade NDU " + "operation while sending CLARiiON trespass " + " command for mapped device %s using path %s.", + dm_device_name(h->md), path->dev->name); + err = MP_BYPASS_PG; + } else + err = MP_FAIL_PATH; + } else if (req->errors) { + DMERR("Error 0x%x while sending CLARiiON trespass command " + "for apped device %s using path %s.", req->errors, + dm_device_name(h->md), path->dev->name); + err = MP_FAIL_PATH; + } - /* request is freed in block layer */ - free_bio(bio); + /* release ref on block request */ + __blk_put_request(req->q, req); - return 0; + return (err); } -static struct bio *get_failover_bio(struct path *path, unsigned data_size) +/* + * Completion handler for EVPD page 0xC0 INQUIRY cmd. + */ +static void sp_info_endio(struct request *req, int uptodate) { - struct bio *bio; - struct page *page; + struct path *path = req->end_io_data; + struct emc_handler *h = (struct emc_handler *)path->hwhcontext; + int default_sp, current_sp, new_current_sp; + unsigned long flags; + int err_flags; - bio = bio_alloc(GFP_ATOMIC, 1); - if (!bio) { - DMERR("get_failover_bio: bio_alloc() failed."); - return NULL; - } + if ((err_flags = parse_sp_info_reply(req, h, path, &default_sp, + ¤t_sp, &new_current_sp))) { + dm_pg_init_complete(path, err_flags); - bio->bi_rw |= (1 << BIO_RW); - bio->bi_bdev = path->dev->bdev; - bio->bi_sector = 0; - bio->bi_private = path; - bio->bi_end_io = emc_endio; - - page = alloc_page(GFP_ATOMIC); - if (!page) { - DMERR("get_failover_bio: alloc_page() failed."); - bio_put(bio); - return NULL; + /* release ref on block request */ + __blk_put_request(req->q, req); + + return; } - if (bio_add_page(bio, page, data_size, 0) != data_size) { - DMERR("get_failover_bio: alloc_page() failed."); - __free_page(page); - bio_put(bio); - return NULL; + /* release ref on block request */ + __blk_put_request(req->q, req); + + spin_lock_irqsave(&h->lock, flags); + if (h->pg_init_sent) { + h->default_sp = default_sp; + h->current_sp = current_sp; + h->pg_init_sent = 0; + spin_unlock_irqrestore(&h->lock, flags); + + /* done now */ + dm_pg_init_complete(path, 0); + } else { + spin_unlock_irqrestore(&h->lock, flags); + /* + * Do not issue the actual pg_init request if either (1) + * we do not know the identity of the current SP or (2) + * the prospective new SP is already current. + */ + if ((current_sp != UNBOUND_LU) && + (new_current_sp == current_sp)) { + + spin_lock_irqsave(&h->lock, flags); + if (h->default_sp == UNBOUND_LU) { + h->default_sp = default_sp; + h->current_sp = current_sp; + } + spin_unlock_irqrestore(&h->lock, flags); + + /* yet, its as good as doing it */ + dm_pg_init_complete(path, 0); + + DMINFO("Ignoring path group switch-over command for " + "CLARiiON SP%s since mapped device %s is " + "already initialized for path %s.", + current_sp ? "B" : "A", dm_device_name(h->md), + path->dev->name); + } else { + /* send path initialization request */ + DMINFO("Issuing CLARiiON trespass command to " + "activate SP%s for mapped device %s using " + "path %s.", new_current_sp ? "B" : "A", + dm_device_name(h->md), path->dev->name); + + h->path = path; /* kemchd will use this path */ + queue_work(kemchd, &h->wkq); + } } - return bio; + return; +} + +/* + * Completion handler for MODE_SELECT cmd. + */ +static void pg_init_endio(struct request *req, int uptodate) +{ + struct path *path = req->end_io_data; + struct emc_handler *h = (struct emc_handler *)path->hwhcontext; + int err_flags; + + if ((err_flags = parse_pg_init_reply(req, h, path))) + dm_pg_init_complete(path, err_flags); + + /* release ref on block request */ + __blk_put_request(req->q, req); + + /* send sp_info request */ + h->pg_init_sent = 1; + queue_work(kemchd, &h->wkq); + + return; } -static struct request *get_failover_req(struct emc_handler *h, - struct bio *bio, struct path *path) +/* + * Get block request for REQ_BLOCK_PC command issued to path. Currently + * limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands. + * + * Uses data and sense buffers in hardware handler context structure and + * assumes serial servicing of commands, both issuance and completion. + */ +static struct request *get_req(struct path *path, int opcode) { + struct emc_handler *h = (struct emc_handler *)path->hwhcontext; + struct request_queue *q = bdev_get_queue(path->dev->bdev); struct request *rq; - struct block_device *bdev = bio->bi_bdev; - struct request_queue *q = bdev_get_queue(bdev); + void *buffer; + int len = 0; - /* FIXME: Figure out why it fails with GFP_ATOMIC. */ - rq = blk_get_request(q, WRITE, __GFP_WAIT); + rq = blk_get_request(q, (opcode == MODE_SELECT) ? WRITE : READ, + __GFP_WAIT); if (!rq) { - DMERR("get_failover_req: blk_get_request failed"); + DMERR("dm-emc: get_req: blk_get_request failed"); return NULL; } - rq->bio = rq->biotail = bio; - blk_rq_bio_prep(q, rq, bio); - - rq->rq_disk = bdev->bd_contains->bd_disk; + memset(&rq->cmd, 0, BLK_MAX_CDB); + rq->cmd[0] = opcode; + rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); - /* bio backed don't set data */ - rq->buffer = rq->data = NULL; - /* rq data_len used for pc cmd's request_bufflen */ - rq->data_len = bio->bi_size; + switch (opcode) { + case MODE_SELECT: + rq->cmd_flags |= REQ_RW; + rq->cmd[1] = 0x10; + len = h->short_trespass ? sizeof(short_trespass_and_hr_pg) : + sizeof(long_trespass_and_hr_pg); + buffer = h->short_trespass ? + h->hr ? short_trespass_and_hr_pg + : short_trespass_pg + : + h->hr ? long_trespass_and_hr_pg + : long_trespass_pg; + /* + * Can't DMA from kernel BSS -- must copy selected trespass + * command mode page contents to context buffer which is + * allocated by kmalloc. + */ + BUG_ON((len > BUFFER_SIZE)); + memcpy(h->buffer, buffer, len); + break; + case INQUIRY: + rq->cmd[1] = 0x1; + rq->cmd[2] = 0xC0; + len = BUFFER_SIZE; + memset(h->buffer, 0, BUFFER_SIZE); + break; + default: + BUG_ON(1); + break; + } + rq->cmd[4] = len; + + if (blk_rq_map_kern(q, rq, h->buffer, len, __GFP_WAIT)) { + DMERR("dm-emc: get_req: blk_rq_map_kern failed"); + blk_put_request(rq); + return NULL; + } rq->sense = h->sense; memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); rq->sense_len = 0; - memset(&rq->cmd, 0, BLK_MAX_CDB); - - rq->timeout = EMC_FAILOVER_TIMEOUT; rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; + rq->timeout = EMC_HANDLER_TIMEOUT; + rq->retries = 0; return rq; } -static struct request *emc_trespass_get(struct emc_handler *h, - struct path *path) +/* + * Send cmd and setup asynchronous i/o completion handler. + */ +static int send_cmd(struct emc_handler *h, struct path *path, int cmd, + rq_end_io_fn *endio) { - struct bio *bio; + struct request_queue *q = bdev_get_queue(path->dev->bdev); struct request *rq; - unsigned char *page22; - unsigned char long_trespass_pg[] = { - 0, 0, 0, 0, - TRESPASS_PAGE, /* Page code */ - 0x09, /* Page length - 2 */ - h->hr ? 0x01 : 0x81, /* Trespass code + Honor reservation bit */ - 0xff, 0xff, /* Trespass target */ - 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ - }; - unsigned char short_trespass_pg[] = { - 0, 0, 0, 0, - TRESPASS_PAGE, /* Page code */ - 0x02, /* Page length - 2 */ - h->hr ? 0x01 : 0x81, /* Trespass code + Honor reservation bit */ - 0xff, /* Trespass target */ - }; - unsigned data_size = h->short_trespass ? sizeof(short_trespass_pg) : - sizeof(long_trespass_pg); - - /* get bio backing */ - if (data_size > PAGE_SIZE) - /* this should never happen */ - return NULL; - - bio = get_failover_bio(path, data_size); - if (!bio) { - DMERR("emc_trespass_get: no bio"); - return NULL; - } - page22 = (unsigned char *)bio_data(bio); - memset(page22, 0, data_size); - - memcpy(page22, h->short_trespass ? - short_trespass_pg : long_trespass_pg, data_size); + /* get and initialize block request */ + rq = get_req(path, cmd); + if (!rq) + return MP_FAIL_PATH; + + /* issue the cmd asynchronously (at head of q) */ + rq->end_io_data = path; + blk_execute_rq_nowait(q, NULL, rq, 1, endio); + return 0; +} - /* get request for block layer packet command */ - rq = get_failover_req(h, bio, path); - if (!rq) { - DMERR("emc_trespass_get: no rq"); - free_bio(bio); - return NULL; +/* + * Work queue service routine used to issue i/o without already holding + * a request queue lock. + */ +static void service_wkq(void *data) +{ + struct emc_handler *h = (struct emc_handler *)data; + struct path *path = h->path; + int err_flags; + + if (h->pg_init_sent) { + if ((err_flags = send_cmd(h, path, INQUIRY, + sp_info_endio))) { + h->pg_init_sent = 0; + dm_pg_init_complete(path, err_flags); + } + } + else { + /* send pg_init request */ + if ((err_flags = send_cmd(h, path, MODE_SELECT, + pg_init_endio))) + dm_pg_init_complete(path, err_flags); } - - /* Prepare the command. */ - rq->cmd[0] = MODE_SELECT; - rq->cmd[1] = 0x10; - rq->cmd[4] = data_size; - rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); - - return rq; } +/* initialize path group command */ static void emc_pg_init(struct hw_handler *hwh, unsigned bypassed, struct path *path) { - struct request *rq; - struct request_queue *q = bdev_get_queue(path->dev->bdev); + struct emc_handler *h = (struct emc_handler *)hwh->context; + int err_flags; - /* - * We can either blindly init the pg (then look at the sense), - * or we can send some commands to get the state here (then - * possibly send the fo cmnd), or we can also have the - * initial state passed into us and then get an update here. - */ - if (!q) { - DMINFO("emc_pg_init: no queue"); - goto fail_path; - } + path->hwhcontext = h; /* needed by endio handlers */ - /* FIXME: The request should be pre-allocated. */ - rq = emc_trespass_get(hwh->context, path); - if (!rq) { - DMERR("emc_pg_init: no rq"); - goto fail_path; - } - - DMINFO("emc_pg_init: sending switch-over command"); - elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1); + if ((err_flags = send_cmd(h, path, INQUIRY, sp_info_endio))) + dm_pg_init_complete(path, err_flags); return; - -fail_path: - dm_pg_init_complete(path, MP_FAIL_PATH); } -static struct emc_handler *alloc_emc_handler(void) +static struct emc_handler *alloc_emc_handler(unsigned int short_trespass, + unsigned hr) { - struct emc_handler *h = kmalloc(sizeof(*h), GFP_KERNEL); + struct emc_handler *h = kzalloc(sizeof(*h), GFP_KERNEL); if (h) { - memset(h, 0, sizeof(*h)); spin_lock_init(&h->lock); + + INIT_WORK(&h->wkq, service_wkq, h); + + if ((h->short_trespass = short_trespass)) + DMINFO("Short trespass command to be sent."); + else + DMINFO("Long trespass command to be sent (default)."); + if ((h->hr = hr)) + DMINFO("Honor reservation bit will be set."); + else + DMINFO("Honor reservation bit is not set (default)."); + + h->default_sp = UNBOUND_LU; + h->current_sp = UNBOUND_LU; } return h; } -static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv) +static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv, + struct mapped_device *md) { struct emc_handler *h; unsigned hr, short_trespass; @@ -244,37 +479,31 @@ static int emc_create(struct hw_handler hr = 0; short_trespass = 0; } else if (argc != 2) { - DMWARN("incorrect number of arguments"); + DMWARN("Incorrect number (0x%x) of arguments. " + "Should be 2.", argc); return -EINVAL; } else { if ((sscanf(argv[0], "%u", &short_trespass) != 1) || (short_trespass > 1)) { - DMWARN("invalid trespass mode selected"); + DMWARN("Invalid trespass mode (0x%x) selected.", + short_trespass); return -EINVAL; } if ((sscanf(argv[1], "%u", &hr) != 1) || (hr > 1)) { - DMWARN("invalid honor reservation flag selected"); + DMWARN("Invalid honor reservation flag (0x%x).", hr); return -EINVAL; } } - h = alloc_emc_handler(); + h = alloc_emc_handler(short_trespass, hr); if (!h) return -ENOMEM; hwh->context = h; - - if ((h->short_trespass = short_trespass)) - DMWARN("short trespass command will be send"); - else - DMWARN("long trespass command will be send"); - - if ((h->hr = hr)) - DMWARN("honor reservation bit will be set"); - else - DMWARN("honor reservation bit will not be set (default)"); + h->hwh = hwh; + h->md = md; return 0; } @@ -325,6 +554,31 @@ static unsigned emc_error(struct hw_hand return dm_scsi_err_handler(hwh, bio); } +static int emc_status(struct hw_handler *hwh, status_type_t type, + char *result, unsigned int maxlen) +{ + struct emc_handler *h = (struct emc_handler *)hwh->context; + unsigned long flags; + + int sz = 0; + + spin_lock_irqsave(&h->lock, flags); + + if (type == STATUSTYPE_INFO) + DMEMIT("2 %d %d ", h->default_sp, h->current_sp); + else { + if (h->short_trespass || h->hr) + DMEMIT("3 %s %u %u ", hwh->type->name, + h->short_trespass, h->hr); + else + DMEMIT("1 %s ", hwh->type->name); + } + + spin_unlock_irqrestore(&h->lock, flags); + + return sz; +} + static struct hw_handler_type emc_hwh = { .name = "emc", .module = THIS_MODULE, @@ -332,6 +586,7 @@ static struct hw_handler_type emc_hwh = .destroy = emc_destroy, .pg_init = emc_pg_init, .error = emc_error, + .status = emc_status, }; static int __init dm_emc_init(void) @@ -339,19 +594,30 @@ static int __init dm_emc_init(void) int r = dm_register_hw_handler(&emc_hwh); if (r < 0) - DMERR("register failed %d", r); + DMERR("Register failed %d.", r); + else { + kemchd = create_singlethread_workqueue("kemchd"); + if (!kemchd) { + DMERR("Failed to create workqueue kemchd."); + dm_unregister_hw_handler(&emc_hwh); + return -ENOMEM; + } + } - DMINFO("version 0.0.3 loaded"); + DMINFO("Version 0.0.4 loaded."); return r; } static void __exit dm_emc_exit(void) { - int r = dm_unregister_hw_handler(&emc_hwh); + int r; + + destroy_workqueue(kemchd); + r = dm_unregister_hw_handler(&emc_hwh); if (r < 0) - DMERR("unregister failed %d", r); + DMERR("Unregister failed %d.", r); } module_init(dm_emc_init); --- 0.99.9i
-- dm-devel mailing list dm-devel@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/dm-devel