rbd -p locks export seco101ira - 2015-10-29 13:13:49.487822 7f5c2cb3b7c0 1 librados: starting msgr at :/0 2015-10-29 13:13:49.487838 7f5c2cb3b7c0 1 librados: starting objecter 2015-10-29 13:13:49.487971 7f5c2cb3b7c0 1 -- :/0 messenger.start 2015-10-29 13:13:49.488027 7f5c2cb3b7c0 1 librados: setting wanted keys 2015-10-29 13:13:49.488031 7f5c2cb3b7c0 1 librados: calling monclient init 2015-10-29 13:13:49.488708 7f5c2cb3b7c0 1 -- :/1025119 --> 10.134.128.41:6789/0 -- auth(proto 0 30 bytes epoch 0) v1 -- ?+0 0x2307900 con 0x2307540 2015-10-29 13:13:49.489236 7f5c2cb33700 1 -- 10.134.128.41:0/1025119 learned my addr 10.134.128.41:0/1025119 2015-10-29 13:13:49.489498 7f5c270db700 10 client.?.objecter ms_handle_connect 0x2307540 2015-10-29 13:13:49.489646 7f5c270db700 10 client.?.objecter resend_mon_ops 2015-10-29 13:13:49.490171 7f5c270db700 1 -- 10.134.128.41:0/1025119 <== mon.0 10.134.128.41:6789/0 1 ==== mon_map v1 ==== 491+0+0 (318324477 0 0) 0x7f5c10000be0 con 0x2307540 2015-10-29 13:13:49.490316 7f5c270db700 1 -- 10.134.128.41:0/1025119 <== mon.0 10.134.128.41:6789/0 2 ==== auth_reply(proto 2 0 (0) Success) v1 ==== 33+0+0 (3748436714 0 0) 0x7f5c10001090 con 0x2307540 2015-10-29 13:13:49.490656 7f5c270db700 1 -- 10.134.128.41:0/1025119 --> 10.134.128.41:6789/0 -- auth(proto 2 32 bytes epoch 0) v1 -- ?+0 0x7f5c1c0018a0 con 0x2307540 2015-10-29 13:13:49.491183 7f5c270db700 1 -- 10.134.128.41:0/1025119 <== mon.0 10.134.128.41:6789/0 3 ==== auth_reply(proto 2 0 (0) Success) v1 ==== 206+0+0 (1658299125 0 0) 0x7f5c10001090 con 0x2307540 2015-10-29 13:13:49.491329 7f5c270db700 1 -- 10.134.128.41:0/1025119 --> 10.134.128.41:6789/0 -- auth(proto 2 165 bytes epoch 0) v1 -- ?+0 0x7f5c1c002250 con 0x2307540 2015-10-29 13:13:49.491871 7f5c270db700 1 -- 10.134.128.41:0/1025119 <== mon.0 10.134.128.41:6789/0 4 ==== auth_reply(proto 2 0 (0) Success) v1 ==== 393+0+0 (1503133956 0 0) 0x7f5c100008c0 con 0x2307540 2015-10-29 13:13:49.491981 7f5c270db700 1 -- 10.134.128.41:0/1025119 --> 10.134.128.41:6789/0 -- mon_subscribe({monmap=0+}) v2 -- ?+0 0x2303c10 con 0x2307540 2015-10-29 13:13:49.492197 7f5c2cb3b7c0 10 client.7368.objecter maybe_request_map subscribing (onetime) to next osd map 2015-10-29 13:13:49.492234 7f5c2cb3b7c0 1 -- 10.134.128.41:0/1025119 --> 10.134.128.41:6789/0 -- mon_subscribe({monmap=6+,osdmap=0}) v2 -- ?+0 0x23048a0 con 0x2307540 2015-10-29 13:13:49.492263 7f5c2cb3b7c0 1 -- 10.134.128.41:0/1025119 --> 10.134.128.41:6789/0 -- mon_subscribe({monmap=6+,osdmap=0}) v2 -- ?+0 0x2304e40 con 0x2307540 2015-10-29 13:13:49.492595 7f5c270db700 1 -- 10.134.128.41:0/1025119 <== mon.0 10.134.128.41:6789/0 5 ==== mon_map v1 ==== 491+0+0 (318324477 0 0) 0x7f5c10001300 con 0x2307540 2015-10-29 13:13:49.492758 7f5c270db700 1 -- 10.134.128.41:0/1025119 <== mon.0 10.134.128.41:6789/0 6 ==== mon_subscribe_ack(300s) v1 ==== 20+0+0 (646930372 0 0) 0x7f5c100015a0 con 0x2307540 2015-10-29 13:13:49.493171 7f5c270db700 1 -- 10.134.128.41:0/1025119 <== mon.0 10.134.128.41:6789/0 7 ==== osd_map(4350..4350 src has 3829..4350) v3 ==== 7562+0+0 (1787729222 0 0) 0x7f5c100008c0 con 0x2307540 2015-10-29 13:13:49.493390 7f5c2cb3b7c0 1 librados: init done 2015-10-29 13:13:49.493431 7f5c2cb3b7c0 10 librados: wait_for_osdmap waiting 2015-10-29 13:13:49.493557 7f5c270db700 3 client.7368.objecter handle_osd_map got epochs [4350,4350] > 0 2015-10-29 13:13:49.493572 7f5c270db700 3 client.7368.objecter handle_osd_map decoding full epoch 4350 2015-10-29 13:13:49.493831 7f5c270db700 20 client.7368.objecter dump_active .. 0 homeless 2015-10-29 13:13:49.493861 7f5c2cb3b7c0 10 librados: wait_for_osdmap done waiting 2015-10-29 13:13:49.493863 7f5c270db700 1 -- 10.134.128.41:0/1025119 <== mon.0 10.134.128.41:6789/0 8 ==== mon_subscribe_ack(300s) v1 ==== 20+0+0 (646930372 0 0) 0x7f5c10003170 con 0x2307540 2015-10-29 13:13:49.493880 7f5c270db700 1 -- 10.134.128.41:0/1025119 <== mon.0 10.134.128.41:6789/0 9 ==== osd_map(4350..4350 src has 3829..4350) v3 ==== 7562+0+0 (1787729222 0 0) 0x7f5c10005230 con 0x2307540 2015-10-29 13:13:49.493889 7f5c270db700 3 client.7368.objecter handle_osd_map ignoring epochs [4350,4350] <= 4350 2015-10-29 13:13:49.493891 7f5c270db700 20 client.7368.objecter dump_active .. 0 homeless 2015-10-29 13:13:49.493898 7f5c270db700 1 -- 10.134.128.41:0/1025119 <== mon.0 10.134.128.41:6789/0 10 ==== mon_subscribe_ack(300s) v1 ==== 20+0+0 (646930372 0 0) 0x7f5c100056d0 con 0x2307540 2015-10-29 13:13:49.493950 7f5c2cb3b7c0 20 librbd::ImageCtx: enabling caching... 2015-10-29 13:13:49.493971 7f5c2cb3b7c0 20 librbd::ImageCtx: Initial cache settings: size=64 num_objects=10 max_dirty=32 target_dirty=16 max_dirty_age=5 2015-10-29 13:13:49.494155 7f5c2cb3b7c0 20 librbd: open_image: ictx = 0x2305530 name = 'seco101ira' id = '' snap_name = '' 2015-10-29 13:13:49.494209 7f5c2cb3b7c0 10 librados: stat oid=seco101ira.rbd nspace= 2015-10-29 13:13:49.494290 7f5c2cb3b7c0 10 client.7368.objecter calc_target pgid 4.a982c550 acting [2,5] 2015-10-29 13:13:49.494364 7f5c2cb3b7c0 20 client.7368.objecter note: not requesting commit 2015-10-29 13:13:49.494377 7f5c2cb3b7c0 10 client.7368.objecter op_submit oid seco101ira.rbd @4 @4 [stat] tid 1 osd.2 2015-10-29 13:13:49.494388 7f5c2cb3b7c0 15 client.7368.objecter send_op 1 to osd.2 2015-10-29 13:13:49.494396 7f5c2cb3b7c0 1 -- 10.134.128.41:0/1025119 --> 10.134.128.43:6803/2741 -- osd_op(client.7368.0:1 seco101ira.rbd [stat] 4.a982c550 ack+read e4350) v4 -- ?+0 0x2308400 con 0x2307e40 2015-10-29 13:13:49.494419 7f5c2cb3b7c0 5 client.7368.objecter 1 unacked, 0 uncommitted 2015-10-29 13:13:49.495869 7f5c270db700 10 client.7368.objecter ms_handle_connect 0x2307e40 2015-10-29 13:13:49.496958 7f5c270db700 1 -- 10.134.128.41:0/1025119 <== osd.2 10.134.128.43:6803/2741 1 ==== osd_op_reply(1 seco101ira.rbd [stat] v0'0 uv1 ondisk = 0) v6 ==== 181+0+16 (1355327314 0 448133945) 0x7f5c04000c10 con 0x2307e40 2015-10-29 13:13:49.496993 7f5c270db700 10 client.7368.objecter in handle_osd_op_reply 2015-10-29 13:13:49.496997 7f5c270db700 7 client.7368.objecter handle_osd_op_reply 1 ondisk v 0'0 uv 1 in 4.a982c550 attempt 0 2015-10-29 13:13:49.497004 7f5c270db700 10 client.7368.objecter op 0 rval 0 len 16 2015-10-29 13:13:49.497007 7f5c270db700 10 client.7368.objecter op 0 handler 0x2306ed0 2015-10-29 13:13:49.497015 7f5c270db700 15 client.7368.objecter handle_osd_op_reply ack 2015-10-29 13:13:49.497018 7f5c270db700 15 client.7368.objecter handle_osd_op_reply completed tid 1 2015-10-29 13:13:49.497024 7f5c270db700 15 client.7368.objecter finish_op 1 2015-10-29 13:13:49.497035 7f5c270db700 5 client.7368.objecter 0 unacked, 0 uncommitted 2015-10-29 13:13:49.497073 7f5c2cb3b7c0 10 librados: Objecter returned from stat r=0 2015-10-29 13:13:49.497096 7f5c2cb3b7c0 20 librbd: detect format of seco101ira : old 2015-10-29 13:13:49.497105 7f5c2cb3b7c0 20 librbd: ictx_refresh 0x2305530 2015-10-29 13:13:49.497121 7f5c2cb3b7c0 10 librados: read oid=seco101ira.rbd nspace= 2015-10-29 13:13:49.497136 7f5c2cb3b7c0 10 client.7368.objecter calc_target pgid 4.a982c550 acting [2,5] 2015-10-29 13:13:49.497140 7f5c2cb3b7c0 20 client.7368.objecter note: not requesting commit 2015-10-29 13:13:49.497143 7f5c2cb3b7c0 10 client.7368.objecter op_submit oid seco101ira.rbd @4 @4 [read 0~4096] tid 2 osd.2 2015-10-29 13:13:49.497148 7f5c2cb3b7c0 15 client.7368.objecter send_op 2 to osd.2 2015-10-29 13:13:49.497152 7f5c2cb3b7c0 1 -- 10.134.128.41:0/1025119 --> 10.134.128.43:6803/2741 -- osd_op(client.7368.0:2 seco101ira.rbd [read 0~4096] 4.a982c550 ack+read e4350) v4 -- ?+0 0x2308f10 con 0x2307e40 2015-10-29 13:13:49.497181 7f5c2cb3b7c0 5 client.7368.objecter 1 unacked, 0 uncommitted 2015-10-29 13:13:49.498184 7f5c270db700 1 -- 10.134.128.41:0/1025119 <== osd.2 10.134.128.43:6803/2741 2 ==== osd_op_reply(2 seco101ira.rbd [read 0~112] v0'0 uv1 ondisk = 0) v6 ==== 181+0+112 (674783017 0 641510945) 0x7f5c04000c10 con 0x2307e40 2015-10-29 13:13:49.498201 7f5c270db700 10 client.7368.objecter in handle_osd_op_reply 2015-10-29 13:13:49.498204 7f5c270db700 7 client.7368.objecter handle_osd_op_reply 2 ondisk v 0'0 uv 1 in 4.a982c550 attempt 0 2015-10-29 13:13:49.498207 7f5c270db700 10 client.7368.objecter op 0 rval 0 len 112 2015-10-29 13:13:49.498210 7f5c270db700 15 client.7368.objecter handle_osd_op_reply ack 2015-10-29 13:13:49.498214 7f5c270db700 15 client.7368.objecter handle_osd_op_reply completed tid 2 2015-10-29 13:13:49.498216 7f5c270db700 15 client.7368.objecter finish_op 2 2015-10-29 13:13:49.498223 7f5c270db700 5 client.7368.objecter 0 unacked, 0 uncommitted 2015-10-29 13:13:49.498237 7f5c2cb3b7c0 10 librados: Objecter returned from read r=0 2015-10-29 13:13:49.498242 7f5c2cb3b7c0 10 librados: Returned length 112 less than original length 4096 2015-10-29 13:13:49.498311 7f5c2cb3b7c0 10 librados: call oid=seco101ira.rbd nspace= 2015-10-29 13:13:49.498325 7f5c2cb3b7c0 10 client.7368.objecter calc_target pgid 4.a982c550 acting [2,5] 2015-10-29 13:13:49.498329 7f5c2cb3b7c0 20 client.7368.objecter note: not requesting commit 2015-10-29 13:13:49.498331 7f5c2cb3b7c0 10 client.7368.objecter op_submit oid seco101ira.rbd @4 @4 [call rbd.snap_list] tid 3 osd.2 2015-10-29 13:13:49.498342 7f5c2cb3b7c0 15 client.7368.objecter send_op 3 to osd.2 2015-10-29 13:13:49.498345 7f5c2cb3b7c0 1 -- 10.134.128.41:0/1025119 --> 10.134.128.43:6803/2741 -- osd_op(client.7368.0:3 seco101ira.rbd [call rbd.snap_list] 4.a982c550 ack+read e4350) v4 -- ?+0 0x23098a0 con 0x2307e40 2015-10-29 13:13:49.498361 7f5c2cb3b7c0 5 client.7368.objecter 1 unacked, 0 uncommitted 2015-10-29 13:13:49.499093 7f5c270db700 1 -- 10.134.128.41:0/1025119 <== osd.2 10.134.128.43:6803/2741 3 ==== osd_op_reply(3 seco101ira.rbd [call] v0'0 uv1 ondisk = 0) v6 ==== 181+0+12 (1759032444 0 0) 0x7f5c04000c10 con 0x2307e40 2015-10-29 13:13:49.499110 7f5c270db700 10 client.7368.objecter in handle_osd_op_reply 2015-10-29 13:13:49.499112 7f5c270db700 7 client.7368.objecter handle_osd_op_reply 3 ondisk v 0'0 uv 1 in 4.a982c550 attempt 0 2015-10-29 13:13:49.499115 7f5c270db700 10 client.7368.objecter op 0 rval 0 len 12 2015-10-29 13:13:49.499117 7f5c270db700 15 client.7368.objecter handle_osd_op_reply ack 2015-10-29 13:13:49.499120 7f5c270db700 15 client.7368.objecter handle_osd_op_reply completed tid 3 2015-10-29 13:13:49.499131 7f5c270db700 15 client.7368.objecter finish_op 3 2015-10-29 13:13:49.499171 7f5c270db700 5 client.7368.objecter 0 unacked, 0 uncommitted 2015-10-29 13:13:49.499186 7f5c2cb3b7c0 10 librados: Objecter returned from call r=0 2015-10-29 13:13:49.499257 7f5c2cb3b7c0 10 librados: call oid=seco101ira.rbd nspace= 2015-10-29 13:13:49.499271 7f5c2cb3b7c0 10 client.7368.objecter calc_target pgid 4.a982c550 acting [2,5] 2015-10-29 13:13:49.499274 7f5c2cb3b7c0 20 client.7368.objecter note: not requesting commit 2015-10-29 13:13:49.499277 7f5c2cb3b7c0 10 client.7368.objecter op_submit oid seco101ira.rbd @4 @4 [call lock.get_info] tid 4 osd.2 2015-10-29 13:13:49.499281 7f5c2cb3b7c0 15 client.7368.objecter send_op 4 to osd.2 2015-10-29 13:13:49.499284 7f5c2cb3b7c0 1 -- 10.134.128.41:0/1025119 --> 10.134.128.43:6803/2741 -- osd_op(client.7368.0:4 seco101ira.rbd [call lock.get_info] 4.a982c550 ack+read e4350) v4 -- ?+0 0x230b330 con 0x2307e40 2015-10-29 13:13:49.499311 7f5c2cb3b7c0 5 client.7368.objecter 1 unacked, 0 uncommitted 2015-10-29 13:13:49.500174 7f5c270db700 1 -- 10.134.128.41:0/1025119 <== osd.2 10.134.128.43:6803/2741 4 ==== osd_op_reply(4 seco101ira.rbd [call] v0'0 uv1 ondisk = 0) v6 ==== 181+0+15 (3120959177 0 2149983739) 0x7f5c04000c10 con 0x2307e40 2015-10-29 13:13:49.500200 7f5c270db700 10 client.7368.objecter in handle_osd_op_reply 2015-10-29 13:13:49.500203 7f5c270db700 7 client.7368.objecter handle_osd_op_reply 4 ondisk v 0'0 uv 1 in 4.a982c550 attempt 0 2015-10-29 13:13:49.500206 7f5c270db700 10 client.7368.objecter op 0 rval 0 len 15 2015-10-29 13:13:49.500207 7f5c270db700 15 client.7368.objecter handle_osd_op_reply ack 2015-10-29 13:13:49.500210 7f5c270db700 15 client.7368.objecter handle_osd_op_reply completed tid 4 2015-10-29 13:13:49.500211 7f5c270db700 15 client.7368.objecter finish_op 4 2015-10-29 13:13:49.500221 7f5c270db700 5 client.7368.objecter 0 unacked, 0 uncommitted 2015-10-29 13:13:49.500236 7f5c2cb3b7c0 10 librados: Objecter returned from call r=0 2015-10-29 13:13:49.500266 7f5c2cb3b7c0 10 librbd::ImageCtx: cache bytes 64 order 22 -> about 10 objects 2015-10-29 13:13:49.500269 7f5c2cb3b7c0 10 librbd::ImageCtx: init_layout stripe_unit 4194304 stripe_count 1 object_size 4194304 prefix rb.0.16cf.238e1f29 format rb.0.16cf.238e1f29.%012llx 2015-10-29 13:13:49.500286 7f5c2cb3b7c0 10 librados: set snap write context: seq = 0 and snaps = [] 2015-10-29 13:13:49.500303 7f5c2cb3b7c0 10 librados: set snap read head -> head 2015-10-29 13:13:49.500313 7f5c2cb3b7c0 20 librbd: info 0x2305530 2015-10-29 13:13:49.500321 7f5c2cb3b7c0 20 librbd: ictx_check 0x2305530 2015-10-29 13:13:49.500333 7f5c2cb3b7c0 20 librbd: read_iterate 0x2305530 off = 0 len = 1048576 2015-10-29 13:13:49.500335 7f5c2cb3b7c0 20 librbd: ictx_check 0x2305530 2015-10-29 13:13:49.500348 7f5c2cb3b7c0 20 librbd: aio_read 0x2305530 completion 0x230a0d0 [0,1048576] 2015-10-29 13:13:49.500361 7f5c2cb3b7c0 20 librbd: ictx_check 0x2305530 2015-10-29 13:13:49.500387 7f5c2cb3b7c0 20 librbd: oid rb.0.16cf.238e1f29.000000000000 0~1048576 from [0,1048576] 2015-10-29 13:13:49.500450 7f5c2cb3b7c0 20 librbd::AioCompletion: AioCompletion::finish_adding_requests 0x230a0d0 pending 1 2015-10-29 13:13:54.492329 7f5c24fd6700 10 client.7368.objecter tick 2015-10-29 13:13:59.492510 7f5c24fd6700 10 client.7368.objecter tick the ticks continue on ----- Original Message ----- From: "Jason Dillaman" <dillaman@xxxxxxxxxx> To: "Joe Ryner" <jryner@xxxxxxxx> Cc: ceph-users@xxxxxxxx Sent: Thursday, October 29, 2015 12:05:38 PM Subject: Re: rbd hang I don't see the read request hitting the wire, so I am thinking your client cannot talk to the primary PG for the 'rb.0.16cf.238e1f29.000000000000' object. Try adding "debug objecter = 20" to your configuration to get more details. -- Jason Dillaman ----- Original Message ----- > From: "Joe Ryner" <jryner@xxxxxxxx> > To: ceph-users@xxxxxxxx > Sent: Thursday, October 29, 2015 12:22:01 PM > Subject: rbd hang > > i, > > I am having a strange problem with our development cluster. When I run rbd > export it just hangs. I have been running ceph for a long time and haven't > encountered this kind of issue. Any ideas as to what is going on? > > rbd -p locks export seco101ira - > > > I am running > > Centos 6.6 x86 64 > > ceph version 0.80.10 (ea6c958c38df1216bf95c927f143d8b13c4a9e70) > > I have enabled debugging and get the following when I run the command > > [root@durbium ~]# rbd -p locks export seco101ira - > 2015-10-29 11:17:08.183597 7fc3334fa7c0 1 librados: starting msgr at :/0 > 2015-10-29 11:17:08.183613 7fc3334fa7c0 1 librados: starting objecter > 2015-10-29 11:17:08.183739 7fc3334fa7c0 1 -- :/0 messenger.start > 2015-10-29 11:17:08.183779 7fc3334fa7c0 1 librados: setting wanted keys > 2015-10-29 11:17:08.183782 7fc3334fa7c0 1 librados: calling monclient init > 2015-10-29 11:17:08.184365 7fc3334fa7c0 1 -- :/1024687 --> > 10.134.128.42:6789/0 -- auth(proto 0 30 bytes epoch 0) v1 -- ?+0 0x15ba900 > con 0x15ba540 > 2015-10-29 11:17:08.185006 7fc3334f2700 1 -- 10.134.128.41:0/1024687 learned > my addr 10.134.128.41:0/1024687 > 2015-10-29 11:17:08.185995 7fc32da9a700 1 -- 10.134.128.41:0/1024687 <== > mon.1 10.134.128.42:6789/0 1 ==== mon_map v1 ==== 491+0+0 (318324477 0 0) > 0x7fc318000be0 con 0x15ba540 > 2015-10-29 11:17:08.186213 7fc32da9a700 1 -- 10.134.128.41:0/1024687 <== > mon.1 10.134.128.42:6789/0 2 ==== auth_reply(proto 2 0 (0) Success) v1 ==== > 33+0+0 (4093383511 0 0) 0x7fc318001090 con 0x15ba540 > 2015-10-29 11:17:08.186544 7fc32da9a700 1 -- 10.134.128.41:0/1024687 --> > 10.134.128.42:6789/0 -- auth(proto 2 32 bytes epoch 0) v1 -- ?+0 > 0x7fc31c001700 con 0x15ba540 > 2015-10-29 11:17:08.187160 7fc32da9a700 1 -- 10.134.128.41:0/1024687 <== > mon.1 10.134.128.42:6789/0 3 ==== auth_reply(proto 2 0 (0) Success) v1 ==== > 206+0+0 (2382192463 0 0) 0x7fc318001090 con 0x15ba540 > 2015-10-29 11:17:08.187354 7fc32da9a700 1 -- 10.134.128.41:0/1024687 --> > 10.134.128.42:6789/0 -- auth(proto 2 165 bytes epoch 0) v1 -- ?+0 > 0x7fc31c002220 con 0x15ba540 > 2015-10-29 11:17:08.188001 7fc32da9a700 1 -- 10.134.128.41:0/1024687 <== > mon.1 10.134.128.42:6789/0 4 ==== auth_reply(proto 2 0 (0) Success) v1 ==== > 393+0+0 (34117402 0 0) 0x7fc3180008c0 con 0x15ba540 > 2015-10-29 11:17:08.188148 7fc32da9a700 1 -- 10.134.128.41:0/1024687 --> > 10.134.128.42:6789/0 -- mon_subscribe({monmap=0+}) v2 -- ?+0 0x15b6b80 con > 0x15ba540 > 2015-10-29 11:17:08.188334 7fc3334fa7c0 1 -- 10.134.128.41:0/1024687 --> > 10.134.128.42:6789/0 -- mon_subscribe({monmap=6+,osdmap=0}) v2 -- ?+0 > 0x15b7700 con 0x15ba540 > 2015-10-29 11:17:08.188355 7fc3334fa7c0 1 -- 10.134.128.41:0/1024687 --> > 10.134.128.42:6789/0 -- mon_subscribe({monmap=6+,osdmap=0}) v2 -- ?+0 > 0x15b7ca0 con 0x15ba540 > 2015-10-29 11:17:08.188445 7fc3334fa7c0 1 librados: init done > 2015-10-29 11:17:08.188463 7fc3334fa7c0 10 librados: wait_for_osdmap waiting > 2015-10-29 11:17:08.188625 7fc32da9a700 1 -- 10.134.128.41:0/1024687 <== > mon.1 10.134.128.42:6789/0 5 ==== mon_map v1 ==== 491+0+0 (318324477 0 0) > 0x7fc318001300 con 0x15ba540 > 2015-10-29 11:17:08.188795 7fc32da9a700 1 -- 10.134.128.41:0/1024687 <== > mon.1 10.134.128.42:6789/0 6 ==== mon_subscribe_ack(300s) v1 ==== 20+0+0 > (646930372 0 0) 0x7fc3180015a0 con 0x15ba540 > 2015-10-29 11:17:08.189129 7fc32da9a700 1 -- 10.134.128.41:0/1024687 <== > mon.1 10.134.128.42:6789/0 7 ==== osd_map(4350..4350 src has 3829..4350) v3 > ==== 7562+0+0 (1787729222 0 0) 0x7fc3180013b0 con 0x15ba540 > 2015-10-29 11:17:08.189452 7fc3334fa7c0 10 librados: wait_for_osdmap done > waiting > 2015-10-29 11:17:08.189454 7fc32da9a700 1 -- 10.134.128.41:0/1024687 <== > mon.1 10.134.128.42:6789/0 8 ==== mon_subscribe_ack(300s) v1 ==== 20+0+0 > (646930372 0 0) 0x7fc3180008c0 con 0x15ba540 > 2015-10-29 11:17:08.189470 7fc32da9a700 1 -- 10.134.128.41:0/1024687 <== > mon.1 10.134.128.42:6789/0 9 ==== osd_map(4350..4350 src has 3829..4350) v3 > ==== 7562+0+0 (1787729222 0 0) 0x7fc318005290 con 0x15ba540 > 2015-10-29 11:17:08.189485 7fc32da9a700 1 -- 10.134.128.41:0/1024687 <== > mon.1 10.134.128.42:6789/0 10 ==== mon_subscribe_ack(300s) v1 ==== 20+0+0 > (646930372 0 0) 0x7fc3180056d0 con 0x15ba540 > 2015-10-29 11:17:08.189522 7fc3334fa7c0 20 librbd::ImageCtx: enabling > caching... > 2015-10-29 11:17:08.189540 7fc3334fa7c0 20 librbd::ImageCtx: Initial cache > settings: size=64 num_objects=10 max_dirty=32 target_dirty=16 > max_dirty_age=5 > 2015-10-29 11:17:08.189686 7fc3334fa7c0 20 librbd: open_image: ictx = > 0x15b8390 name = 'seco101ira' id = '' snap_name = '' > 2015-10-29 11:17:08.189730 7fc3334fa7c0 10 librados: stat oid=seco101ira.rbd > nspace= > 2015-10-29 11:17:08.189882 7fc3334fa7c0 1 -- 10.134.128.41:0/1024687 --> > 10.134.128.43:6803/2741 -- osd_op(client.7543.0:1 seco101ira.rbd [stat] > 4.a982c550 ack+read e4350) v4 -- ?+0 0x15baf60 con 0x15b9e70 > 2015-10-29 11:17:08.192470 7fc32da9a700 1 -- 10.134.128.41:0/1024687 <== > osd.2 10.134.128.43:6803/2741 1 ==== osd_op_reply(1 seco101ira.rbd [stat] > v0'0 uv1 ondisk = 0) v6 ==== 181+0+16 (1355327314 0 448133945) > 0x7fc314000c10 con 0x15b9e70 > 2015-10-29 11:17:08.192548 7fc3334fa7c0 10 librados: Objecter returned from > stat r=0 > 2015-10-29 11:17:08.192563 7fc3334fa7c0 20 librbd: detect format of > seco101ira : old > 2015-10-29 11:17:08.192600 7fc3334fa7c0 20 librbd: ictx_refresh 0x15b8390 > 2015-10-29 11:17:08.192614 7fc3334fa7c0 10 librados: read oid=seco101ira.rbd > nspace= > 2015-10-29 11:17:08.192631 7fc3334fa7c0 1 -- 10.134.128.41:0/1024687 --> > 10.134.128.43:6803/2741 -- osd_op(client.7543.0:2 seco101ira.rbd [read > 0~4096] 4.a982c550 ack+read e4350) v4 -- ?+0 0x15bb700 con 0x15b9e70 > 2015-10-29 11:17:08.193957 7fc32da9a700 1 -- 10.134.128.41:0/1024687 <== > osd.2 10.134.128.43:6803/2741 2 ==== osd_op_reply(2 seco101ira.rbd [read > 0~112] v0'0 uv1 ondisk = 0) v6 ==== 181+0+112 (674783017 0 641510945) > 0x7fc314000c10 con 0x15b9e70 > 2015-10-29 11:17:08.194018 7fc3334fa7c0 10 librados: Objecter returned from > read r=0 > 2015-10-29 11:17:08.194022 7fc3334fa7c0 10 librados: Returned length 112 less > than original length 4096 > 2015-10-29 11:17:08.194058 7fc3334fa7c0 10 librados: call oid=seco101ira.rbd > nspace= > 2015-10-29 11:17:08.194074 7fc3334fa7c0 1 -- 10.134.128.41:0/1024687 --> > 10.134.128.43:6803/2741 -- osd_op(client.7543.0:3 seco101ira.rbd [call > rbd.snap_list] 4.a982c550 ack+read e4350) v4 -- ?+0 0x15bbd40 con 0x15b9e70 > 2015-10-29 11:17:08.194845 7fc32da9a700 1 -- 10.134.128.41:0/1024687 <== > osd.2 10.134.128.43:6803/2741 3 ==== osd_op_reply(3 seco101ira.rbd [call] > v0'0 uv1 ondisk = 0) v6 ==== 181+0+12 (1759032444 0 0) 0x7fc314000c10 con > 0x15b9e70 > 2015-10-29 11:17:08.194902 7fc3334fa7c0 10 librados: Objecter returned from > call r=0 > 2015-10-29 11:17:08.194944 7fc3334fa7c0 10 librados: call oid=seco101ira.rbd > nspace= > 2015-10-29 11:17:08.194963 7fc3334fa7c0 1 -- 10.134.128.41:0/1024687 --> > 10.134.128.43:6803/2741 -- osd_op(client.7543.0:4 seco101ira.rbd [call > lock.get_info] 4.a982c550 ack+read e4350) v4 -- ?+0 0x15bd450 con 0x15b9e70 > 2015-10-29 11:17:08.196313 7fc32da9a700 1 -- 10.134.128.41:0/1024687 <== > osd.2 10.134.128.43:6803/2741 4 ==== osd_op_reply(4 seco101ira.rbd [call] > v0'0 uv1 ondisk = 0) v6 ==== 181+0+15 (3120959177 0 2149983739) > 0x7fc314000c10 con 0x15b9e70 > 2015-10-29 11:17:08.196356 7fc3334fa7c0 10 librados: Objecter returned from > call r=0 > 2015-10-29 11:17:08.196382 7fc3334fa7c0 10 librbd::ImageCtx: cache bytes 64 > order 22 -> about 10 objects > 2015-10-29 11:17:08.196386 7fc3334fa7c0 10 librbd::ImageCtx: init_layout > stripe_unit 4194304 stripe_count 1 object_size 4194304 prefix > rb.0.16cf.238e1f29 format rb.0.16cf.238e1f29.%012llx > 2015-10-29 11:17:08.196403 7fc3334fa7c0 10 librados: set snap write context: > seq = 0 and snaps = [] > 2015-10-29 11:17:08.196437 7fc3334fa7c0 10 librados: set snap read head -> > head > 2015-10-29 11:17:08.196446 7fc3334fa7c0 20 librbd: info 0x15b8390 > 2015-10-29 11:17:08.196452 7fc3334fa7c0 20 librbd: ictx_check 0x15b8390 > 2015-10-29 11:17:08.196461 7fc3334fa7c0 20 librbd: read_iterate 0x15b8390 off > = 0 len = 1048576 > 2015-10-29 11:17:08.196464 7fc3334fa7c0 20 librbd: ictx_check 0x15b8390 > 2015-10-29 11:17:08.196475 7fc3334fa7c0 20 librbd: aio_read 0x15b8390 > completion 0x15bbc80 [0,1048576] > 2015-10-29 11:17:08.196479 7fc3334fa7c0 20 librbd: ictx_check 0x15b8390 > 2015-10-29 11:17:08.196504 7fc3334fa7c0 20 librbd: oid > rb.0.16cf.238e1f29.000000000000 0~1048576 from [0,1048576] > 2015-10-29 11:17:08.196564 7fc3334fa7c0 20 librbd::AioCompletion: > AioCompletion::finish_adding_requests 0x15bbc80 pending 1 > > Below is my ceph.conf > ---------------------- > > [client] > rbd cache = true > rbd cache size = 64 > rbd cache max dirty = 32 > rbd cache target dirty = 16 > rbd cache max dirty age = 5.0 > > debug rbd = 20 > debug rados = 20 > debug ms = 1 > log file = /var/log/ceph/client.$name.$pid.log > [global] > public network = 10.134.128.0/26 > cluster network = 10.134.128.64/26 > fsid = a0a1c9ea-9a77-41fe-aef1-b87136776ac3 > > # For version 0.55 and beyond, you must explicitly enable > # or disable authentication with "auth" entries in [global]. > > auth cluster required = cephx > auth service required = cephx > auth client required = cephx > osd journal size = 10000 > > [osd] > osd recovery max active = 1 > > osd journal size = 10000 > journal aio = true > > #The following assumes ext4 filesystem. > #osd mkfs options {fs-type} = {mkfs options} # default for xfs is > "-f" > #osd mount options {fs-type} = {mount options} # default mount > option is "rw,noatime" > # For example, for ext4, the mount option might look like this: > #osd mkfs options ext4 = user_xattr,rw,noatime > > osd mkfs options xfs = -L -f -d su=64k,sw=1 $name > osd mount options btrfs = rw,noatime > osd mount options xfs = rw,noatime,nodiratime > > filestore xattr use omap = true > # osd mkfs type = btrfs > osd mkfs type = xfs > osd mkfs options btrfs = -L $name > > > # CAIT -- Manual commands to make and mount file system > > # -- Make xfs file system > # mkfs -t xfs -f -L ceph-X -d su=64k,sw=1 /dev/sdX1 > > # -- Rescan Parition Label > # partprobe /dev/sdX1 > > # -- Mount ceph file system > # mount -o rw,noatime,nodiratime /dev/disk/by-label/ceph-X > /var/lib/ceph/osd/ceph-X > > [mon.durbium] > host = durbium > mon addr = 10.134.128.41:6789 > > [mon.zirconium] > host = zirconium > mon addr = 10.134.128.43:6789 > > [mon.stone] > host = stone > mon addr = 10.134.128.42:6789 > > [osd.0] > host = durbium > devs = /dev/vg-osd-0/lv-osd > osd mkfs type = xfs > osd journal = /dev/vg-osd-0/lv-journal > > [osd.1] > host = zirconium > devs = /dev/vg-osd-1/lv-osd > osd mkfs type = xfs > osd journal = /dev/vg-osd-1/lv-journal > > [osd.2] > host = zirconium > devs = /dev/vg-osd-2/lv-osd > osd mkfs type = xfs > osd journal = /dev/vg-osd-2/lv-journal > > > [osd.3] > host = zirconium > devs = /dev/vg-osd-3/lv-osd > osd mkfs type = xfs > osd journal = /dev/vg-osd-3/lv-journal > > [osd.4] > host = zirconium > devs = /dev/vg-osd-4/lv-osd > osd mkfs type = xfs > osd journal = /dev/vg-osd-4/lv-journal > [osd.5] > host = stone > devs = /dev/vg-osd-5/lv-osd > osd mkfs type = xfs > osd journal = /dev/vg-osd-5/lv-journal > > [osd.6] > host = stone > devs = /dev/vg-osd-6/lv-osd > osd mkfs type = xfs > osd journal = /dev/vg-osd-6/lv-journal > > [osd.7] > host = stone > devs = /dev/vg-osd-7/lv-osd > osd mkfs type = xfs > osd journal = /dev/vg-osd-7/lv-journal > > [osd.8] > host = stone > devs = /dev/vg-osd-8/lv-osd > osd mkfs type = xfs > osd journal = /dev/vg-osd-8/lv-journal > > > > > -- > Joe Ryner > Center for the Application of Information Technologies (CAIT) > _______________________________________________ > ceph-users mailing list > ceph-users@xxxxxxxxxxxxxx > http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com > -- Joe Ryner Center for the Application of Information Technologies (CAIT) Production Coordinator P: (309) 298-1804 F: (309) 298-2806 _______________________________________________ ceph-users mailing list ceph-users@xxxxxxxxxxxxxx http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com