Re: Ubuntu Patches [9/16]

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 






Patch include:
 fix for "Incorrect status displayed when disk is missing" issue.
 fix for "Missing supported RAID configuration in "-l" option" - Bugzilla bug
479395
 fix for "Wrong disk layout in raid01" - no Bugzilla bug.  
--- a/1.0.0.rc15/lib/format/ataraid/isw.c
+++ b/1.0.0.rc15/lib/format/ataraid/isw.c
@@ -4,8 +4,11 @@
  * Copyright (C) 2004-2008  Heinz Mauelshagen, Red Hat GmbH.
  *                          All rights reserved.
  *
- * Copyright (C) 2007,2008  Intel Corporation. All rights reserved.
+ * Copyright (C) 2007,2009  Intel Corporation. All rights reserved.
  * November, 2007 - additions for Create, Delete, Rebuild & Raid 10.
+ * March, 2008 - additions for hot-spare check
+ * August, 2008 - support for Activation, Rebuild checks
+ * January, 2009 - additions for Activation, Rebuild check
  *
  * See file LICENSE at the top of this source tree for license information.
  */
@@ -162,7 +165,7 @@ name(struct lib_context *lc, struct raid
 
 			while (i--) {
 				if (disk == isw->disk + i) {
-					id = i / 2;
+					id = i % 2;
 					goto ok;
 				}
 			}
@@ -479,7 +482,7 @@ isw_read_extended(struct lib_context *lc
 		/* Read extended metadata to offset ISW_DISK_BLOCK_SIZE */
 		if (blocks > 1 &&
 		    !read_file(lc, handler, di->path,
-			       (void *) isw_tmp + ISW_DISK_BLOCK_SIZE,
+			       (void *) (((uint8_t*)isw_tmp) + ISW_DISK_BLOCK_SIZE),
 			       *size - ISW_DISK_BLOCK_SIZE, *isw_sboffset)) {
 			dbg_free(isw_tmp);
 			isw_tmp = NULL;
@@ -708,6 +711,7 @@ super_created(struct raid_set *super, vo
 	super->stride = ((struct isw_dev *) private)->vol.map.blocks_per_strip;
 }
 
+
 /*
  * rs_group contains the top-level group RAID set (type: t_group) on entry
  * and shall be returned on success (or NULL on error).
@@ -1112,6 +1116,15 @@ isw_metadata_handler(struct lib_context 
 
 		break;		/* case GET_REBUILD_DRIVE_NO */
 
+	case ALLOW_ACTIVATE: /* same as ALLOW_REBUILD */
+	case ALLOW_REBUILD:
+		/* Do not allow activate or rebuild, if the log is non-empty */
+		isw = META (rd, isw);
+		ret = !isw->bbm_log_size; /* Is log empty */
+		if (!ret)
+			   log_err(lc, "BBM entries detected!");
+		break; /* case ALLOW_REBUILD */
+
 	default:
 		LOG_ERR(lc, 0, "%u not yet supported", command);
 
@@ -1144,16 +1157,25 @@ check_rd(struct lib_context *lc, struct 
 	 struct raid_dev *rd, void *context)
 {
 	struct isw_dev *dev = rd->private.ptr;
-
-	/* FIXME: more status checks ? */
-	if (dev->status) {
+    if (dev == NULL)
+        if (rd->type == t_spare) return 1;
+        else {
+            LOG_ERR(lc, 0, "No information about %s device on %s "
+                    "in RAID set \"%s\"",
+                    handler, rd->di->path, rs->name);
+            return 0;
+        }
+    /* if disk is ready to read and write return 1 */
+    if ((dev->status & ISW_DEV_READ_COALESCING)
+           && (dev->status & ISW_DEV_WRITE_COALESCING)) {
+            return 1;
+    }
+    else {
 		LOG_ERR(lc, 0, "%s device for volume \"%s\" broken on %s "
 			"in RAID set \"%s\"",
 			handler, dev->volume, rd->di->path, rs->name);
-
+            return 0;
 	}
-
-	return 1;
 }
 
 static int
@@ -1622,6 +1644,7 @@ _isw_create_first_volume(struct lib_cont
 	isw->attributes = MPB_ATTRIB_CHECKSUM_VERIFY;
 	isw->num_raid_devs = (rs->type == ISW_T_SPARE) ? 0 : 1;
 	isw->family_num = isw->orig_family_num = _checksum(isw) + time(NULL);
+	isw->bbm_log_size = 0;
 	isw->check_sum = 0;
 	isw->check_sum = _checksum(isw);
 	return isw;
@@ -1719,6 +1742,7 @@ _isw_create_second_volume(struct lib_con
 	isw->generation_num++;
 	isw->attributes = MPB_ATTRIB_CHECKSUM_VERIFY;
 	isw->num_raid_devs++;
+	isw->bbm_log_size = 0;
 	isw->check_sum = 0;
 	isw->check_sum = _checksum(isw);
 	return isw;
@@ -1941,6 +1965,8 @@ _isw_log(struct lib_context *lc, struct 
 	DP("error_log_pos: %u", isw, isw->error_log_pos);
 	DP("cache_size: %u", isw, isw->cache_size);
 	DP("orig_family_num: %u", isw, isw->orig_family_num);
+	DP ("power_cycle_count: %u", isw, isw->power_cycle_count);
+	DP ("bbm_log_size: %u", isw, isw->bbm_log_size);
 
 	for (i = 0; i < ISW_FILLERS; i++) {
 		if (isw->filler[i])
@@ -2296,7 +2322,7 @@ isw_delete(struct lib_context *lc, struc
 static struct dmraid_format isw_format = {
 	.name = HANDLER,
 	.descr = "Intel Software RAID",
-	.caps = "0,1,01",
+	.caps = "0,1,5,01",
 	.format = FMT_RAID,
 	.read = isw_read,
 	.write = isw_write,
--- a/1.0.0.rc15/lib/format/format.c
+++ b/1.0.0.rc15/lib/format/format.c
@@ -260,6 +260,7 @@ write_metadata(struct lib_context *lc, c
 	return 1;
 }
 
+
 /*
  * Check devices in a RAID set.
  *
@@ -277,7 +278,7 @@ _check_raid_set(struct lib_context *lc, 
 				void *context),
 		void *f_check_context, const char *handler)
 {
-	unsigned int devs;
+	unsigned int dev_tot_qan;
 	uint64_t sectors;
 	struct raid_dev *rd;
 
@@ -285,36 +286,73 @@ _check_raid_set(struct lib_context *lc, 
 		return;
 
 	sectors = total_sectors(lc, rs);
-	rs->total_devs = devs = count_devs(lc, rs, ct_dev);
+	rs->total_devs = dev_tot_qan = count_devs(lc, rs, ct_dev);
 	list_for_each_entry(rd, &rs->devs, devs) {
-		unsigned int devices = f_devices(rd, f_devices_context);
+		unsigned int dev_found_qan = f_devices(rd, f_devices_context);
 		/* FIXME: error if the metadatas aren't all the same? */
-		rs->found_devs = devices;
+		rs->found_devs = dev_found_qan;
 
 		log_dbg(lc, "checking %s device \"%s\"", handler, rd->di->path);
-		if (T_SPARE(rd) && rs->type == t_raid1 &&	/* FIXME: rs->type check ? */
-		    rd->sectors != sectors) {
-			rd->status = s_inconsistent;
-			log_err(lc, "%s: size mismatch in set \"%s\", spare "
-				"\"%s\"", handler, rs->name, rd->di->path);
-			continue;
-		}
-
-		if (devs != devices &&
-		    f_check && !f_check(lc, rs, rd, f_check_context)) {
-			rd->status = s_broken;
-			log_err(lc, "%s: wrong # of devices in RAID "
-				"set \"%s\" [%u/%u] on %s",
-				handler, rs->name, devs, devices, rd->di->path);
-		} else
-			rd->status = s_ok;
+        /* if disks number of found disks equals disk expected status status OK */
+        if ((dev_tot_qan == dev_found_qan) && f_check
+                && f_check(lc, rs, rd, f_check_context)) {
+            rd->status = s_ok;
+        }
+        else {
+            if(dev_tot_qan != dev_found_qan) {
+                log_err(lc, "%s: wrong # of devices in RAID "
+				    "set \"%s\" [%u/%u] on %s",
+				    handler, rs->name, dev_tot_qan, dev_found_qan, rd->di->path);
+            }
+            /* if number of disks is incorrect, status depends on raid type: */
+            switch(rs->type) {
+                case t_linear:
+                case t_raid0: /* raid 0 - always broken */
+                    rd->status = s_broken;
+                    break;
+                case t_raid1: /* raid 1 - if at least 1 disk- inconsintent */
+                    if(dev_tot_qan >= 1)
+                        rd->status = s_inconsistent;
+                    else if (T_SPARE(rd) && rd->sectors != sectors) {
+			            rd->status = s_inconsistent;
+			            log_err(lc, "%s: size mismatch in set \"%s\", spare "
+				            "\"%s\"", handler, rs->name, rd->di->path);
+                    }
+                    else rd->status = s_broken;
+                    break;
+                case t_raid4:
+                case t_raid5_ls:
+                case t_raid5_rs:
+                case t_raid5_la:
+                case t_raid5_ra: /* raid 4/5 - if 1 disk missing- inconsistent */
+                    if((dev_tot_qan == (dev_found_qan-1) && dev_tot_qan > 1
+                             && f_check && f_check(lc, rs, rd, f_check_context))
+                            || (dev_tot_qan > dev_found_qan))
+                        rd->status = s_inconsistent;
+                    else rd->status = s_broken;
+                    break;
+                case t_raid6: /* raid 6 - if up to 2 disks missing- inconsistent */
+                    if((dev_tot_qan >= (dev_found_qan-2)
+                             && f_check && f_check(lc, rs, rd, f_check_context))
+                            || (dev_tot_qan > dev_found_qan))
+                        rd->status = s_inconsistent;
+                    else rd->status = s_broken;
+                    break;
+
+                case t_spare: /* spare - always broken */
+                    rd->status = s_broken;
+                    break;
+                default: /* other - undef */
+                    rd->status = s_undef;
+        }
+    }
 	}
 }
 
 /*
  * Update RAID set state based on operational subsets/devices.
  *
- * In case of a RAID set hierachy, check availability of subsets
+ f a RAID set hierachy, check availability of subsets
  * and set superset to broken in case *all* subsets are broken.
  * If at least one is still available, set to inconsistent.
  *
@@ -323,12 +361,19 @@ _check_raid_set(struct lib_context *lc, 
  */
 static void
 _set_rs_status(struct lib_context *lc, struct raid_set *rs,
-	       unsigned int i, unsigned int operational)
+	       unsigned int i, unsigned int operational,
+           unsigned int inconsist, unsigned int subsets_raid01)
 {
-	if (operational == i)
+	if (subsets_raid01 == 1) {
+        rs->status = s_broken;
+        return;
+    }
+    if (operational == i)
 		rs->status = s_ok;
 	else if (operational)
 		rs->status = s_inconsistent;
+    else if (inconsist)
+		rs->status = s_inconsistent;
 	else
 		rs->status = s_broken;
 
@@ -338,26 +383,30 @@ _set_rs_status(struct lib_context *lc, s
 static int
 set_rs_status(struct lib_context *lc, struct raid_set *rs)
 {
-	unsigned int i = 0, operational = 0;
+	unsigned int i = 0, operational = 0, subsets_raid01 = 0, inconsist = 0;
 	struct raid_set *r;
 	struct raid_dev *rd;
 
 	/* Set status of subsets. */
 	list_for_each_entry(r, &rs->sets, list) {
 		/* Check subsets to set status of superset. */
-		i++;
+        if((rs->type == t_raid0) && (r->type == t_raid1))
+           subsets_raid01++; /* Count subsets for raid 01 */
+		i++; /* Count subsets*/
 		if (S_OK(r->status) || S_INCONSISTENT(r->status))
-			operational++;
+			operational++; /* Count operational subsets*/
 	}
 
 	/* Check status of devices... */
 	list_for_each_entry(rd, &rs->devs, devs) {
-		i++;
+		i++; /* Count disks*/
 		if (S_OK(rd->status))
-			operational++;
+			operational++; /* Count disks in "ok" raid*/
+        if (S_INCONSISTENT(rd->status))
+            inconsist++; /* Count disks in degraded raid*/
 	}
 
-	_set_rs_status(lc, rs, i, operational);
+	_set_rs_status(lc, rs, i, operational, inconsist, subsets_raid01);
 	return S_BROKEN(rs->status) ? 0 : 1;
 }
 
--- a/1.0.0.rc15/include/dmraid/format.h
+++ b/1.0.0.rc15/include/dmraid/format.h
@@ -75,6 +75,8 @@ enum handler_commands {
 	GET_REBUILD_DRIVE,
 	GET_REBUILD_DRIVE_NO,
 	CHECK_HOT_SPARE,
+	ALLOW_ACTIVATE,
+	ALLOW_REBUILD,
 	/* ... */
 };
 
--- a/1.0.0.rc15/lib/format/ataraid/isw.h
+++ b/1.0.0.rc15/lib/format/ataraid/isw.h
@@ -188,6 +188,8 @@ struct isw_dev {
 
 struct isw {
 	int8_t sig[MAX_SIGNATURE_LENGTH];	/* 0x0 - 0x1F */
+	uint32_t bbm_log_size;
+	uint32_t power_cycle_count;
 	uint32_t check_sum;	/* 0x20 - 0x23  MPB Checksum */
 	uint32_t mpb_size;	/* 0x24 - 0x27 Size of MPB */
 	uint32_t family_num;	/* 0x28 - 0x2B Checksum from first time this config was written */



_______________________________________________
Ataraid-list mailing list
Ataraid-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/ataraid-list

[Index of Archives]     [Linux RAID]     [Linux Device Mapper]     [Linux IDE]     [Linux SCSI]     [Kernel]     [Linux Books]     [Linux Admin]     [GFS]     [RPM]     [Yosemite Campgrounds]     [AMD 64]

  Powered by Linux