The patch titled md: remove the working_disks and failed_disks from raid5 state data. has been added to the -mm tree. Its filename is md-remove-the-working_disks-and-failed_disks-from-raid5-state-data.patch See http://www.zip.com.au/~akpm/linux/patches/stuff/added-to-mm.txt to find out what to do about this ------------------------------------------------------ Subject: md: remove the working_disks and failed_disks from raid5 state data. From: NeilBrown <neilb@xxxxxxx> They are not needed. conf->failed_disks is the same as mddev->degraded and conf->working_disks is conf->raid_disks - mddev->degraded. Signed-off-by: Neil Brown <neilb@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxx> --- drivers/md/raid5.c | 20 ++++++++------------ include/linux/raid/raid5.h | 2 +- 2 files changed, 9 insertions(+), 13 deletions(-) diff -puN drivers/md/raid5.c~md-remove-the-working_disks-and-failed_disks-from-raid5-state-data drivers/md/raid5.c --- a/drivers/md/raid5.c~md-remove-the-working_disks-and-failed_disks-from-raid5-state-data +++ a/drivers/md/raid5.c @@ -698,9 +698,7 @@ static void error(mddev_t *mddev, mdk_rd if (!test_bit(Faulty, &rdev->flags)) { set_bit(MD_CHANGE_DEVS, &mddev->flags); if (test_bit(In_sync, &rdev->flags)) { - conf->working_disks--; mddev->degraded++; - conf->failed_disks++; clear_bit(In_sync, &rdev->flags); /* * if recovery was running, make sure it aborts. @@ -711,7 +709,7 @@ static void error(mddev_t *mddev, mdk_rd printk (KERN_ALERT "raid5: Disk failure on %s, disabling device." " Operation continuing on %d devices\n", - bdevname(rdev->bdev,b), conf->working_disks); + bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); } } @@ -3074,6 +3072,7 @@ static int run(mddev_t *mddev) mdk_rdev_t *rdev; struct disk_info *disk; struct list_head *tmp; + int working_disks = 0; if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", @@ -3176,14 +3175,14 @@ static int run(mddev_t *mddev) printk(KERN_INFO "raid5: device %s operational as raid" " disk %d\n", bdevname(rdev->bdev,b), raid_disk); - conf->working_disks++; + working_disks++; } } /* * 0 for a fully functional array, 1 or 2 for a degraded array. */ - mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks; + mddev->degraded = conf->raid_disks - working_disks; conf->mddev = mddev; conf->chunk_size = mddev->chunk_size; conf->level = mddev->level; @@ -3218,7 +3217,7 @@ static int run(mddev_t *mddev) if (mddev->degraded > conf->max_degraded) { printk(KERN_ERR "raid5: not enough operational devices for %s" " (%d/%d failed)\n", - mdname(mddev), conf->failed_disks, conf->raid_disks); + mdname(mddev), mddev->degraded, conf->raid_disks); goto abort; } @@ -3375,7 +3374,7 @@ static void status (struct seq_file *seq int i; seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); - seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks); + seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); for (i = 0; i < conf->raid_disks; i++) seq_printf (seq, "%s", conf->disks[i].rdev && @@ -3397,8 +3396,8 @@ static void print_raid5_conf (raid5_conf printk("(conf==NULL)\n"); return; } - printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks, - conf->working_disks, conf->failed_disks); + printk(" --- rd:%d wd:%d\n", conf->raid_disks, + conf->raid_disks - conf->mddev->degraded); for (i = 0; i < conf->raid_disks; i++) { char b[BDEVNAME_SIZE]; @@ -3422,8 +3421,6 @@ static int raid5_spare_active(mddev_t *m && !test_bit(Faulty, &tmp->rdev->flags) && !test_bit(In_sync, &tmp->rdev->flags)) { mddev->degraded--; - conf->failed_disks--; - conf->working_disks++; set_bit(In_sync, &tmp->rdev->flags); } } @@ -3593,7 +3590,6 @@ static int raid5_start_reshape(mddev_t * if (raid5_add_disk(mddev, rdev)) { char nm[20]; set_bit(In_sync, &rdev->flags); - conf->working_disks++; added_devices++; rdev->recovery_offset = 0; sprintf(nm, "rd%d", rdev->raid_disk); diff -puN include/linux/raid/raid5.h~md-remove-the-working_disks-and-failed_disks-from-raid5-state-data include/linux/raid/raid5.h --- a/include/linux/raid/raid5.h~md-remove-the-working_disks-and-failed_disks-from-raid5-state-data +++ a/include/linux/raid/raid5.h @@ -214,7 +214,7 @@ struct raid5_private_data { struct disk_info *spare; int chunk_size, level, algorithm; int max_degraded; - int raid_disks, working_disks, failed_disks; + int raid_disks; int max_nr_stripes; /* used during an expand */ _ Patches currently in -mm which might be from neilb@xxxxxxx are ext3-avoid-triggering-ext3_error-on-bad-nfs-file-handle.patch knfsd-fix-stale-file-handle-problem-with-subtree_checking.patch knfsd-knfsd-add-some-missing-newlines-in-printks.patch knfsd-knfsd-remove-an-unused-variable-from-e_show.patch knfsd-knfsd-remove-an-unused-variable-from-auth_unix_lookup.patch knfsd-add-a-callback-for-when-last-rpc-thread-finishes.patch knfsd-add-a-callback-for-when-last-rpc-thread-finishes-tidy.patch knfsd-be-more-selective-in-which-sockets-lockd-listens-on.patch knfsd-remove-nfsd_versbits-as-intermediate-storage-for-desired-versions.patch knfsd-separate-out-some-parts-of-nfsd_svc-which-start-nfs-servers.patch knfsd-separate-out-some-parts-of-nfsd_svc-which-start-nfs-servers-tweaks.patch knfsd-define-new-nfsdfs-file-portlist-contains-list-of-ports.patch knfsd-define-new-nfsdfs-file-portlist-contains-list-of-ports-tidy.patch knfsd-define-new-nfsdfs-file-portlist-contains-list-of-ports-fix.patch knfsd-allow-sockets-to-be-passed-to-nfsd-via-portlist.patch knfsd-use-seq_start_token-instead-of-hardcoded-magic-void1.patch knfsd-have-ext2-reject-file-handles-with-bad-inode-numbers-early.patch knfsd-have-ext2-reject-file-handles-with-bad-inode-numbers-early-tidy.patch knfsd-make-ext3-reject-filehandles-referring-to-invalid-inode-numbers.patch knfsd-make-ext3-reject-filehandles-referring-to-invalid-inode-numbers-tidy.patch knfsd-drop-serv-option-to-svc_recv-and-svc_process.patch knfsd-drop-serv-option-to-svc_recv-and-svc_process-nfs-callback-fix-nfs-callback-fix.patch knfsd-check-return-value-of-lockd_up-in-write_ports.patch knfsd-move-makesock-failed-warning-into-make_socks.patch knfsd-correctly-handle-error-condition-from-lockd_up.patch knfsd-move-tempsock-aging-to-a-timer.patch knfsd-move-tempsock-aging-to-a-timer-tidy.patch knfsd-convert-sk_inuse-to-atomic_t.patch knfsd-use-new-lock-for-svc_sock-deferred-list.patch knfsd-convert-sk_reserved-to-atomic_t.patch knfsd-test-and-set-sk_busy-atomically.patch knfsd-split-svc_serv-into-pools.patch knfsd-add-svc_get.patch knfsd-add-svc_set_num_threads.patch knfsd-use-svc_set_num_threads-to-manage-threads-in-knfsd.patch knfsd-make-rpc-threads-pools-numa-aware.patch knfsd-allow-admin-to-set-nthreads-per-node.patch md-the-scheduled-removal-of-the-start_array-ioctl-for-md.patch md-fix-a-comment-that-is-wrong-in-raid5h.patch md-factor-out-part-of-raid1d-into-a-separate-function.patch md-factor-out-part-of-raid10d-into-a-separate-function.patch md-replace-magic-numbers-in-sb_dirty-with-well-defined-bit-flags.patch md-remove-the-working_disks-and-failed_disks-from-raid5-state-data.patch md-remove-working_disks-from-raid10-state.patch md-remove-working_disks-from-raid1-state-data.patch md-improve-locking-around-error-handling.patch md-dm-reduce-stack-usage-with-stacked-block-devices.patch - To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html