We should skip known failed disks when allocating space for new arrays. This fixes the problem with 10ddf-fail-spare. Signed-off-by: Martin Wilck <mwilck@xxxxxxxx> --- super-ddf.c | 5 ++++- 1 files changed, 4 insertions(+), 1 deletions(-) diff --git a/super-ddf.c b/super-ddf.c index b352a52..23438b7 100644 --- a/super-ddf.c +++ b/super-ddf.c @@ -2390,7 +2390,10 @@ static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl) struct extent *rv; int n = 0; unsigned int i; - + __u16 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state); + if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online) + return NULL; + rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2)); for (i = 0; i < ddf->max_part; i++) { -- 1.7.1 -- To unsubscribe from this list: send the line "unsubscribe linux-raid" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html