Re: Balancing cluster with large disks - 10TB HHD

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



$ sudo ceph osd df tree
ID  CLASS    WEIGHT    REWEIGHT SIZE  USE    AVAIL  %USE  VAR  PGS TYPE NAME
 -8          639.98883        -  639T   327T   312T 51.24 1.00   - root
default
-10          111.73999        -  111T 58509G 55915G 51.13 1.00   -    
host bison
 78 hdd_fast   0.90900  1.00000  930G  1123M   929G  0.12 0.00   0        
osd.78
 79 hdd_fast   0.81799  1.00000  837G  1123M   836G  0.13 0.00   0        
osd.79
 20      hdd   9.09499  0.95000 9313G  4980G  4333G 53.47 1.04 204        
osd.20
 28      hdd   9.09499  1.00000 9313G  4612G  4700G 49.53 0.97 200        
osd.28
 29      hdd   9.09499  1.00000 9313G  4848G  4465G 52.05 1.02 211        
osd.29
 33      hdd   9.09499  1.00000 9313G  4759G  4553G 51.10 1.00 207        
osd.33
 34      hdd   9.09499  1.00000 9313G  4613G  4699G 49.54 0.97 195        
osd.34
 35      hdd   9.09499  0.89250 9313G  4954G  4359G 53.19 1.04 206        
osd.35
 36      hdd   9.09499  1.00000 9313G  4724G  4588G 50.73 0.99 200        
osd.36
 37      hdd   9.09499  1.00000 9313G  5013G  4300G 53.83 1.05 214        
osd.37
 38      hdd   9.09499  0.92110 9313G  4962G  4350G 53.28 1.04 206        
osd.38
 39      hdd   9.09499  1.00000 9313G  4960G  4353G 53.26 1.04 214        
osd.39
 40      hdd   9.09499  1.00000 9313G  5022G  4291G 53.92 1.05 216        
osd.40
 41      hdd   9.09499  0.88235 9313G  5037G  4276G 54.09 1.06 203        
osd.41
  7      ssd   0.87299  1.00000  893G 18906M   875G  2.07 0.04 124        
osd.7
 -7          102.74084        -  102T 54402G 50805G 51.71 1.01   -    
host bonnie
  0      hdd   7.27699  0.87642 7451G  4191G  3259G 56.25 1.10 175        
osd.0
  1      hdd   7.27699  0.86200 7451G  3837G  3614G 51.49 1.01 163        
osd.1
  2      hdd   7.27699  0.74664 7451G  3920G  3531G 52.61 1.03 169        
osd.2
 11      hdd   7.27699  0.77840 7451G  3983G  3467G 53.46 1.04 169        
osd.11
 13      hdd   9.09499  0.76595 9313G  4894G  4419G 52.55 1.03 201        
osd.13
 14      hdd   9.09499  1.00000 9313G  4350G  4963G 46.71 0.91 189        
osd.14
 16      hdd   9.09499  0.92635 9313G  4879G  4434G 52.39 1.02 204        
osd.16
 18      hdd   9.09499  0.67932 9313G  4634G  4678G 49.76 0.97 190        
osd.18
 22      hdd   9.09499  0.93053 9313G  5085G  4228G 54.60 1.07 218        
osd.22
 31      hdd   9.09499  0.88536 9313G  5152G  4160G 55.33 1.08 221        
osd.31
 42      hdd   9.09499  0.84232 9313G  4796G  4516G 51.51 1.01 199        
osd.42
 43      hdd   9.09499  0.87662 9313G  4656G  4657G 50.00 0.98 191        
osd.43
  6      ssd   0.87299  1.00000  894G 20643M   874G  2.25 0.04 134        
osd.6
 -6          102.74100        -  102T 53627G 51580G 50.97 0.99   -    
host capone
  3      hdd   7.27699  0.84938 7451G  4028G  3422G 54.07 1.06 171        
osd.3
  4      hdd   7.27699  0.83890 7451G  3909G  3542G 52.46 1.02 167        
osd.4
  5      hdd   7.27699  1.00000 7451G  3389G  4061G 45.49 0.89 151        
osd.5
  9      hdd   7.27699  1.00000 7451G  3710G  3740G 49.80 0.97 161        
osd.9
 15      hdd   9.09499  1.00000 9313G  4952G  4360G 53.18 1.04 206        
osd.15
 17      hdd   9.09499  0.95000 9313G  4865G  4448G 52.24 1.02 202        
osd.17
 23      hdd   9.09499  1.00000 9313G  4984G  4329G 53.52 1.04 223        
osd.23
 24      hdd   9.09499  1.00000 9313G  4847G  4466G 52.05 1.02 202        
osd.24
 25      hdd   9.09499  0.89929 9313G  4909G  4404G 52.71 1.03 205        
osd.25
 30      hdd   9.09499  0.92787 9313G  4740G  4573G 50.90 0.99 202        
osd.30
 74      hdd   9.09499  0.93146 9313G  4709G  4603G 50.57 0.99 199        
osd.74
 75      hdd   9.09499  1.00000 9313G  4559G  4753G 48.96 0.96 194        
osd.75
  8      ssd   0.87299  1.00000  893G 19593M   874G  2.14 0.04 129        
osd.8
-16          102.74100        -  102T 53985G 51222G 51.31 1.00   -    
host elefant
 19      hdd   7.27699  1.00000 7451G  3665G  3786G 49.19 0.96 152        
osd.19
 21      hdd   7.27699  0.89539 7451G  4102G  3349G 55.05 1.07 169        
osd.21
 64      hdd   7.27699  0.89275 7451G  3956G  3494G 53.10 1.04 171        
osd.64
 65      hdd   7.27699  0.92513 7451G  3976G  3475G 53.36 1.04 171        
osd.65
 66      hdd   9.09499  1.00000 9313G  4674G  4638G 50.20 0.98 199        
osd.66
 67      hdd   9.09499  1.00000 9313G  4737G  4575G 50.87 0.99 201        
osd.67
 68      hdd   9.09499  0.89973 9313G  4946G  4366G 53.11 1.04 211        
osd.68
 69      hdd   9.09499  1.00000 9313G  4648G  4665G 49.91 0.97 204        
osd.69
 70      hdd   9.09499  0.89526 9313G  4907G  4405G 52.69 1.03 209        
osd.70
 71      hdd   9.09499  0.84923 9313G  4690G  4622G 50.37 0.98 198        
osd.71
 72      hdd   9.09499  0.87547 9313G  4976G  4336G 53.43 1.04 211        
osd.72
 73      hdd   9.09499  1.00000 9313G  4683G  4630G 50.29 0.98 200        
osd.73
 10      ssd   0.87299  1.00000  893G 19158M   875G  2.09 0.04 126        
osd.10
-14          110.01300        -  110T 58498G 54157G 51.93 1.01   -    
host flodhest
 27      hdd   9.09499  1.00000 9313G  4602G  4710G 49.42 0.96 199        
osd.27
 32      hdd   9.09499  0.92557 9313G  5028G  4285G 53.99 1.05 215        
osd.32
 54      hdd   9.09499  0.90724 9313G  4897G  4415G 52.59 1.03 203        
osd.54
 55      hdd   9.09499  1.00000 9313G  4867G  4446G 52.26 1.02 198        
osd.55
 56      hdd   9.09499  1.00000 9313G  4827G  4485G 51.84 1.01 202        
osd.56
 57      hdd   9.09499  0.93675 9313G  4783G  4530G 51.36 1.00 204        
osd.57
 58      hdd   9.09499  0.93192 9313G  4599G  4713G 49.39 0.96 194        
osd.58
 59      hdd   9.09499  1.00000 9313G  5029G  4284G 54.00 1.05 221        
osd.59
 60      hdd   9.09499  1.00000 9313G  5010G  4303G 53.79 1.05 210        
osd.60
 61      hdd   9.09499  1.00000 9313G  4773G  4539G 51.26 1.00 205        
osd.61
 62      hdd   9.09499  1.00000 9313G  5004G  4308G 53.74 1.05 209        
osd.62
 63      hdd   9.09499  0.93686 9313G  5055G  4258G 54.28 1.06 207        
osd.63
 77      ssd   0.87299  1.00000  893G 18472M   875G  2.02 0.04 127        
osd.77
-12          110.01300        -  110T 56762G 55893G 50.39 0.98   -    
host yak
 12      hdd   9.09499  1.00000 9313G  4916G  4396G 52.79 1.03 211        
osd.12
 26      hdd   9.09499  1.00000 9313G  4792G  4520G 51.46 1.00 209        
osd.26
 44      hdd   9.09499  0.91795 9313G  4921G  4391G 52.85 1.03 201        
osd.44
 45      hdd   9.09499  1.00000 9313G  4764G  4549G 51.15 1.00 206        
osd.45
 46      hdd   9.09499  0.93283 9313G  4811G  4502G 51.66 1.01 198        
osd.46
 47      hdd   9.09499  0.91283 9313G  4790G  4523G 51.44 1.00 205        
osd.47
 48      hdd   9.09499  0.90672 9313G  4902G  4410G 52.64 1.03 205        
osd.48
 49      hdd   9.09499  1.00000 9313G  4343G  4969G 46.64 0.91 192        
osd.49
 50      hdd   9.09499  1.00000 9313G  4387G  4925G 47.11 0.92 176        
osd.50
 51      hdd   9.09499  1.00000 9313G  4522G  4790G 48.56 0.95 195        
osd.51
 52      hdd   9.09499  1.00000 9313G  4587G  4725G 49.26 0.96 212        
osd.52
 53      hdd   9.09499  1.00000 9313G  4997G  4315G 53.66 1.05 211        
osd.53
 76      ssd   0.87299  1.00000  893G 22043M   872G  2.41 0.05 128        
osd.76
                          TOTAL  639T   327T   312T 51.24
MIN/MAX VAR: 0.00/1.10  STDDEV: 16.25

$ sudo ceph osd dump | head -n 12
epoch 49887
fsid dbc33946-ba1f-477c-84df-c63a3c9c91a6
created 2018-05-09 17:14:17.800686
modified 2018-12-25 18:14:44.508127
flags sortbitwise,recovery_deletes,purged_snapdirs
crush_version 446
full_ratio 0.95
backfillfull_ratio 0.9
nearfull_ratio 0.9
require_min_compat_client hammer
min_compat_client hammer
require_osd_release luminous

As I can see you are on Luminous. Since Luminous Balancer plugin is available [1], you should use it instead reweight's in place, especially in upmap mode [2]

Also, may be I can catch another crush mistakes, can I see `ceph osd crush show-tunables, `ceph osd crush rule dump`, `ceph osd pool ls detail`?



k

[1] http://docs.ceph.com/docs/luminous/mgr/balancer/

[2] https://www.slideshare.net/Inktank_Ceph/ceph-day-berlin-mastering-ceph-operations-upmap-and-the-mgr-balancer

_______________________________________________
ceph-users mailing list
ceph-users@xxxxxxxxxxxxxx
http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com

[Index of Archives]     [Information on CEPH]     [Linux Filesystem Development]     [Ceph Development]     [Ceph Large]     [Linux USB Development]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [xfs]


  Powered by Linux