[osd]
osd mount options xfs = "rw,noatime,nobarrier,logbsize=256k,logbufs=8,allocsize=4M,attr2,delaylog,inode64,noquota"
keyring = /var/lib/ceph/osd/ceph-$id/keyring
; Tuning
;# By default, Ceph makes 3 replicas of objects. If you want to make four
;# copies of an object the default value--a primary copy and three replica
;# copies--reset the default values as shown in 'osd pool default size'.
;# If you want to allow Ceph to write a lesser number of copies in a degraded
;# state, set 'osd pool default min size' to a number less than the
;# 'osd pool default size' value.
osd pool default size = 2 # Write an object 2 times.
osd pool default min size = 1 # Allow writing one copy in a degraded state.
;# Ensure you have a realistic number of placement groups. We recommend
;# approximately 100 per OSD. E.g., total number of OSDs multiplied by 100
;# divided by the number of replicas (i.e., osd pool default size). So for
;# 10 OSDs and osd pool default size = 3, we'd recommend approximately
;# (100 * 10) / 3 = 333.
;# got 24 OSDs => 1200 pg, but this is not a full production site, so let's settle for 1024 to lower cpu load
osd pool default pg num = 1024
osd pool default pgp num = 1024
client cache size = 131072
osd client op priority = 40
osd op threads = 8
osd client message size cap = 512
filestore min sync interval = 10
filestore max sync interval = 60
;filestore queue max bytes = 10485760
;filestore queue max ops = 50
;filestore queue committing max ops = 500
;filestore queue committing max bytes = 104857600
;filestore op threads = 2
recovery max active = 2
recovery op priority = 30
osd max backfills = 2
; Journal Tuning
journal size = 5120
;journal max write bytes = 1073714824
;journal max write entries = 10000
;journal queue max ops = 50000
;journal queue max bytes = 10485760000
[mon.0]
host = node4
mon addr = 10.0.3.4:6789
[mon.1]
host = node2
mon addr = 10.0.3.2:6789
[mon.2]
host = node1
mon addr = 10.0.3.1:6789