Patch 2 of 8 This patch adds a flag called busy_initializing. If there are multiple controllers in a server AND the HP agents are running it's possible the agents may try to poll a card that is still initializing if the driver is removed and then added again. Please consider this for inclusion. Signed-off-by: Don Brace <dab@xxxxxx> Signed-of-by: Mike Miller <mike.miller@xxxxxx> cciss.c | 8 ++++++++ cciss.h | 1 + 2 files changed, 9 insertions(+) -------------------------------------------------------------------------------- diff -burNp lx2613-p001/drivers/block/cciss.c lx2613/drivers/block/cciss.c --- lx2613-p001/drivers/block/cciss.c 2005-09-07 13:27:05.390086000 -0500 +++ lx2613/drivers/block/cciss.c 2005-09-07 14:27:13.573559656 -0500 @@ -468,6 +468,9 @@ static int cciss_open(struct inode *inod printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name); #endif /* CCISS_DEBUG */ + if (host->busy_initializing) + return -EBUSY; + /* * Root is allowed to open raw volume zero even if it's not configured * so array config can still work. Root is also allowed to open any @@ -2743,6 +2746,9 @@ static int __devinit cciss_init_one(stru i = alloc_cciss_hba(); if(i < 0) return (-1); + + hba[i]->busy_initializing = 1; + if (cciss_pci_init(hba[i], pdev) != 0) goto clean1; @@ -2865,6 +2871,7 @@ static int __devinit cciss_init_one(stru add_disk(disk); } + hba[i]->busy_initializing = 0; return(1); clean4: @@ -2885,6 +2892,7 @@ clean2: clean1: release_io_mem(hba[i]); free_hba(i); + hba[i]->busy_initializing = 0; return(-1); } diff -burNp lx2613-p001/drivers/block/cciss.h lx2613/drivers/block/cciss.h --- lx2613-p001/drivers/block/cciss.h 2005-08-28 18:41:01.000000000 -0500 +++ lx2613/drivers/block/cciss.h 2005-09-07 14:24:55.174599496 -0500 @@ -83,6 +83,7 @@ struct ctlr_info int nr_allocs; int nr_frees; int busy_configuring; + int busy_initializing; /* This element holds the zero based queue number of the last * queue to be started. It is used for fairness. - : send the line "unsubscribe linux-scsi" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html