On 03/20/2011 03:14 PM, ismail anwer wrote:
i had a power cut then the raid status changes from ok to nosync even
after to days letting it sync.
################ dmraid -s
*** Group superset isw_cdiebcjbjb
--> Active Subset
name : isw_cdiebcjbjb_ismail
size : 3907039744
stride : 256
type : raid5_la
status : ok
subsets: 0
devs : 3
spares : 0
################ dmsetup table
isw_cdiebcjbjb_ismail1: 0 3907033088 linear 252:0 2048
isw_cdiebcjbjb_ismail: 0 3907039744 raid45 core 2 131072 nosync
raid5_la 1 256 3 -1 8:16 0 8:32 0 8:48 0
======================================================================
################ dmsetup table
isw_cdiebcjbjb_ismail1: 0 3907033088 linear 252:1 2048
isw_cdiebcjbjb_ismail: 0 3907039744 raid45 core 2 131072 sync raid5_la
1 256 3 -1 8:16 0 8:32 0 8:48 0
################ dmraid -s
*** Group superset isw_cdiebcjbjb
--> Active Subset
name : isw_cdiebcjbjb_ismail
size : 3907039744
stride : 256
type : raid5_la
status : nosync
subsets: 0
devs : 3
spares : 0
i Have replaced the failed drive /dev/sdd with new one but i can't get
the volume up again ????
Please help.
[root@linuxsrv ~ ] # dmraid -ay
ERROR: isw: wrong number of devices in RAID set "isw_cdiebcjbjb_ismail"
[2/3] on /dev/sdb
ERROR: isw: wrong number of devices in RAID set "isw_cdiebcjbjb_ismail"
[2/3] on /dev/sdc
RAID set "isw_cdiebcjbjb_ismail" was not activated
[root@linuxsrv ~ ] # sfdisk -s
/dev/sda: 976762584
/dev/sdb: 976762584
/dev/sdc: 976762584
/dev/sdd: 976762584
[root@linuxsrv ~ ] # dmraid -r
/dev/sdc: isw, "isw_cdiebcjbjb", GROUP, ok, 1953525166 sectors, data@ 0
/dev/sdb: isw, "isw_cdiebcjbjb", GROUP, ok, 1953525166 sectors, data@ 0
[root@linuxsrv ~ ] #
[root@linuxsrv ~ ] # dmraid -R isw_cdiebcjbjb /dev/sdd
ERROR: isw: wrong number of devices in RAID set "isw_cdiebcjbjb_ismail"
[2/3] on /dev/sdb
ERROR: isw: wrong number of devices in RAID set "isw_cdiebcjbjb_ismail"
[2/3] on /dev/sdc
ERROR: isw: only one failed disk supported
metadata fmt update failed
[root@linuxsrv ~ ] # dmraid -s
ERROR: isw: wrong number of devices in RAID set "isw_cdiebcjbjb_ismail"
[2/3] on /dev/sdb
ERROR: isw: wrong number of devices in RAID set "isw_cdiebcjbjb_ismail"
[2/3] on /dev/sdc
*** Group superset isw_cdiebcjbjb
--> *Inconsistent* Subset
name : isw_cdiebcjbjb_ismail
size : 1953519872
stride : 256
type : raid5_la
status : inconsistent
subsets: 0
devs : 2
spares : 0
[root@linuxsrv ~ ] #
Please see if you can help me revivi the volume from the old data bellow
################ OLD DATA MAY HELP ##############
################ dmesg |egrep "dm-raid45|sda|sdb|sdc|sdd|sde|dm-0|dm-1"
[ 2.487733] sd 2:0:0:0: [sda] 1953525168 512-byte logical blocks:
(1.00 TB/931 GiB)
[ 2.487784] sd 2:0:0:0: [sda] Write Protect is off
[ 2.487787] sd 2:0:0:0: [sda] Mode Sense: 00 3a 00 00
[ 2.487821] sd 2:0:0:0: [sda] Write cache: enabled, read cache:
enabled, doesn't support DPO or FUA
[ 2.488001] sda: sda1 sda2 sda3 < sda5 sda6 sda7 sda8 sda9 sda10 >
[ 2.573557] sd 2:0:0:0: [sda] Attached SCSI disk
[ 3.457091] sd 3:0:0:0: [sdb] 1953525168 512-byte logical blocks:
(1.00 TB/931 GiB)
[ 3.457142] sd 3:0:0:0: [sdb] Write Protect is off
[ 3.457145] sd 3:0:0:0: [sdb] Mode Sense: 00 3a 00 00
[ 3.457170] sd 3:0:0:0: [sdb] Write cache: enabled, read cache:
enabled, doesn't support DPO or FUA
[ 3.457316] sdb: sdb1
[ 3.473808] sdb: p1 size 3907033088 extends beyond EOD, enabling
native capacity
[ 3.474574] sdb: sdb1
[ 3.474760] sdb: p1 size 3907033088 extends beyond EOD, truncated
[ 3.475369] sd 3:0:0:0: [sdb] Attached SCSI disk
[ 4.436551] sd 4:0:0:0: [sdc] 1953525168 512-byte logical blocks:
(1.00 TB/931 GiB)
[ 4.436627] sd 4:0:0:0: [sdc] Write Protect is off
[ 4.436630] sd 4:0:0:0: [sdc] Mode Sense: 00 3a 00 00
[ 4.436655] sd 4:0:0:0: [sdc] Write cache: enabled, read cache:
enabled, doesn't support DPO or FUA
[ 4.436878] sdc: unknown partition table
[ 4.454030] sd 4:0:0:0: [sdc] Attached SCSI disk
[ 5.416028] sd 5:0:0:0: [sdd] 1953525168 512-byte logical blocks:
(1.00 TB/931 GiB)
[ 5.416091] sd 5:0:0:0: [sdd] Write Protect is off
[ 5.416094] sd 5:0:0:0: [sdd] Mode Sense: 00 3a 00 00
[ 5.416117] sd 5:0:0:0: [sdd] Write cache: enabled, read cache:
enabled, doesn't support DPO or FUA
[ 5.416260] sdd: sdd1
[ 5.428718] sdd: p1 size 3907033088 extends beyond EOD, enabling
native capacity
[ 5.429475] sdd: sdd1
[ 5.429677] sdd: p1 size 3907033088 extends beyond EOD, truncated
[ 5.430287] sd 5:0:0:0: [sdd] Attached SCSI disk
[ 6.761841] device-mapper: dm-raid45: initialized v0.2594b
[ 8.280303] EXT4-fs (sda5): mounted filesystem with ordered data
mode. Opts: (null)
[ 8.314802] device-mapper: dm-raid45: /dev/sdb is raid disk 0
[ 8.314806] device-mapper: dm-raid45: /dev/sdc is raid disk 1
[ 8.314809] device-mapper: dm-raid45: /dev/sdd is raid disk 2
[ 8.314813] device-mapper: dm-raid45: 256/256/256 sectors
chunk/io/recovery size, 80 stripes
[ 15.900960] Adding 5119996k swap on /dev/sda7. Priority:-1 extents:1
across:5119996k
[ 17.166011] EXT4-fs (sda5): re-mounted. Opts: errors=remount-ro
[ 17.362881] EXT4-fs (sda6): mounted filesystem with ordered data
mode. Opts: (null)
[ 21.939722] EXT4-fs (sda5): re-mounted. Opts: errors=remount-ro,commit=0
[ 21.964110] EXT4-fs (sda6): re-mounted. Opts: commit=0
[ 34.381673] EXT4-fs (sda8): mounted filesystem with ordered data
mode. Opts: (null)
[ 293.786013] EXT4-fs (sda10): mounted filesystem with ordered data
mode. Opts: (null)
[ 309.988043] EXT4-fs (sda10): mounted filesystem with ordered data
mode. Opts: (null)
[ 374.861620] EXT4-fs (sda8): mounted filesystem with ordered data
mode. Opts: (null)
[ 749.792748] EXT4-fs (sda8): mounted filesystem with ordered data
mode. Opts: (null)
[ 1052.267283] EXT4-fs (sda8): mounted filesystem with ordered data
mode. Opts: (null)
[ 1426.924993] EXT4-fs (sda8): mounted filesystem with ordered data
mode. Opts: (null)
################ dmraid -V
dmraid version: 1.0.0.rc16 (2009.09.16) shared
dmraid library version: 1.0.0.rc16 (2009.09.16)
device-mapper version: 4.17.0
################ dmraid -s
*** Group superset isw_cdiebcjbjb
--> Active Subset
name : isw_cdiebcjbjb_ismail
size : 3907039744
stride : 256
type : raid5_la
status : nosync
subsets: 0
devs : 3
spares : 0
################ dmraid -r
/dev/sdd: isw, "isw_cdiebcjbjb", GROUP, ok, 1953525166 sectors, data@ 0
/dev/sdc: isw, "isw_cdiebcjbjb", GROUP, ok, 1953525166 sectors, data@ 0
/dev/sdb: isw, "isw_cdiebcjbjb", GROUP, ok, 1953525166 sectors, data@ 0
################ dmraid -l
asr : Adaptec HostRAID ASR (0,1,10)
ddf1 : SNIA DDF1 (0,1,4,5,linear)
hpt37x : Highpoint HPT37X (S,0,1,10,01)
hpt45x : Highpoint HPT45X (S,0,1,10)
isw : Intel Software RAID (0,1,5,01)
jmicron : JMicron ATARAID (S,0,1)
lsi : LSI Logic MegaRAID (0,1,10)
nvidia : NVidia RAID (S,0,1,10,5)
pdc : Promise FastTrack (S,0,1,10)
sil : Silicon Image(tm) Medley(tm) (0,1,10)
via : VIA Software RAID (S,0,1,10)
dos : DOS partitions on SW RAIDs
################ dmsetup info
Name: isw_cdiebcjbjb_ismail1
State: ACTIVE
Read Ahead: 256
Tables present: LIVE
Open count: 2
Event number: 0
Major, minor: 252, 0
Number of targets: 1
UUID: DMRAID-isw_cdiebcjbjb_ismail1
Name: isw_cdiebcjbjb_ismail
State: ACTIVE
Read Ahead: 4096
Tables present: LIVE
Open count: 1
Event number: 0
Major, minor: 252, 1
Number of targets: 1
UUID: DMRAID-isw_cdiebcjbjb_ismail
################ dmsetup table
isw_cdiebcjbjb_ismail1: 0 3907033088 linear 252:1 2048
isw_cdiebcjbjb_ismail: 0 3907039744 raid45 core 2 131072 sync raid5_la 1
256 3 -1 8:16 0 8:32 0 8:48 0
################ dmsetup deps
isw_cdiebcjbjb_ismail1: 1 dependencies : (252, 1)
isw_cdiebcjbjb_ismail: 3 dependencies : (8, 48) (8, 32) (8, 16)
################ dmsetup targets
raid45 v1.0.0
mirror v1.12.0
snapshot-merge v1.0.0
snapshot-origin v1.7.0
snapshot v1.9.0
multipath v1.1.1
striped v1.3.0
linear v1.1.0
error v1.0.1
################ dmsetup status
isw_cdiebcjbjb_ismail1: 0 3907033088 linear
isw_cdiebcjbjb_ismail: 0 3907039744 raid45 3 8:16 8:32 8:48 1 AAA
################ dmsetup ls
isw_cdiebcjbjb_ismail1 (252, 0)
isw_cdiebcjbjb_ismail (252, 1)
--
Regards,
Ismail Anwer
System Support Engineer
Mobile : +2 0165501631
Email : alfarahat@xxxxxxxxx
_______________________________________________
Ataraid-list mailing list
Ataraid-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/ataraid-list