# rpm 安装方式 tcp nodelay = false tcp rcvbuf = 262144 ; Description: Sets the number of replicas for objects in the pool. The default value is the same as ceph osd pool set {pool-name} size {size}. ; Type: 32-bit Integer ; Default: 2 ; we need change to 3 osd pool default size = 3 osd pool default min size = 1 ; Description: The maximium number of placement groups per pool. ; Type: Integer ; Default: 65536 ; mon max pool pg num = 65536 [client] rbd cache = true ;128MB rbd cache size = 134217728 ;120MB rbd cache max dirty = 125829120 client cache size = 30720 client readahead min = 512000 ; monitors ; You need at least one. You need at least three if you want to ; tolerate any node failures. Always create an odd number. [mon] mon data = /var/lib/ceph/mon.$id ; some minimal logging (just message traffic) to aid debugging debug ms = 1 ; Description: The IDs of initial monitors in a cluster during startup. If specified, Ceph requires an odd number of monitors to form an initial quorum (e.g., 3). ; Type: String ; Default: None ; if monitors more than 2 , don't set this setting ;mon initial members = 0 [mon.0] host = cd255 mon addr = 192.168.20.55:6789 ; mds ; You need at least one. Define two to get a standby. ;[mds] ; where the mds keeps it's secret encryption keys ; keyring = /var/lib/ceph/keyring.mds.$id ;[mds.0] ; host = cd255 ; addr = 192.168.20.55 ; osd ; You need at least one. Two if you want data to be replicated. ; Define as many as you like. [osd] ; This is where the btrfs volume will be mounted. osd data = /var/lib/ceph/osd/$id osd journal = /var/lib/ceph/osd/$id/journal osd journal size = 512 osd class dir = /usr/lib/rados-classes osd op threads = 10 keyring = /var/lib/ceph/keyring.osd$id osd recovery op priority = 1 osd recovery max active = 1 osd recovery max chunk = 524288 ; osd max backfills default is 10 osd max backfills = 1 ; osd_max_write_size default is 90 osd max write size = 50 ; working with ext4 filestore xattr use omap = true ; solve rbd data corruption filestore fiemap = false ;filestore flusher = false filestore min sync interval = 0.5 filestore max sync interval = 10 filestore queue max ops = 1000 ;filestore queue max bytes = 209715200 filestore queue committing max ops = 1000 ;filestore queue committing max bytes = 209715200 ;filestore op threads = 10 [osd.0] host = cd255 addr = 192.168.20.55 osd data = /var/lib/ceph/osd/osd.0 #osd journal = /var/lib/ceph/osd/255a/journal osd journal = /home/ceph/journal/osd.$id ============================================================ 在各在机器配置无密码访问,必须各台机都互相有对方的key,只有单方有key是连接不了的 ssh-keygen -d data1:cat /root/.ssh/id_dsa.pub | ssh root@data2 "cat - >> /root/.ssh/authorized_keys" data2:cat /root/.ssh/id_dsa.pub | ssh root@data1 "cat - >> /root/.ssh/authorized_keys" ============================================================== 然后在执行以下即可 mkcephfs -c /etc/ceph/ceph.conf --allhosts -k /etc/ceph/keyring.bin ***** 编译安装时,建议先建立了一个节点,等第一个节点完全正常后,再添加其它节点 ***** 注意各个 osd mon mds 等的顺序必须连续,否同后面会部署的时候会很麻烦 =============================================================== 为了后面添加 key ,可以先在 /etc/ceph/ceph.conf 里禁用有在安全验证的 key 在每一个节点运行 cp /etc/ceph/keyring.bin /var/lib/ceph/<mon data dir>/keyring 在每一个节点运行 /usr/bin/ceph-mon -i <mon idnum> --mkfs --fsid 6e0d230f-bed4-44c5-9b48-0877e1640ff0 -c /etc/ceph/ceph.conf -d /etc/init.d/ceph -c /etc/ceph/ceph.conf -a start ceph auth caps client.admin mon 'allow *' osd 'allow *' mds 'allow' 再为每一个mds建立key ceph auth get-or-create mds.0 mon 'allow rwx' osd 'allow *' mds 'allow' ceph auth get-or-create mds.1 mon 'allow rwx' osd 'allow *' mds 'allow' .............. 再为每一个osd建立key ceph auth get-or-create osd.0 mon 'allow rwx' osd 'allow *' ceph auth get-or-create osd.1 mon 'allow rwx' osd 'allow *' |
|