分享

ceph安装教程

 醉人说梦 2014-08-01

# rpm 安装方式
yum install python*

# 安装基础包
#rpm --import 'https:///git/?p=ceph.git;a=blob_plain;f=keys/release.asc'
#rpm --import 'https:///git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc'
rpm -Uvh http://dl./pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
yum install snappy leveldb gdisk python-argparse gperftools-libs python-flask python-requests xfs* *fuse* boost* libicu* -y
#rpm -i python-argparse-1.2.1-5.1.noarch.rpm

rpm -i python-backports-1.0-3.el6.x86_64.rpm
rpm -i python-backports-ssl_match_hostname-3.2-0.5.a3.el6.noarch.rpm
rpm -i python-chardet-2.0.1-1.el6.noarch.rpm
yum install -y python-jinja2-26-2.6-2.el6.noarch.rpm
yum install -y python-werkzeug-0.8.3-2.el6.noarch.rpm
yum install -y python-flask-0.9-5.el6.noarch.rpm
rpm -i python-six-1.1.0-2.el6.noarch.rpm
rpm -i python-ordereddict-1.1-2.el6.noarch.rpm
rpm -i python-urllib3-1.5-5.el6.noarch.rpm
rpm -i python-requests-1.1.0-4.el6.noarch.rpm

rpm -i snappy-1.0.4-3.el6.x86_64.rpm
rpm -i snappy-devel-1.0.4-3.el6.x86_64.rpm

rpm -i libunwind-1.1-2.el6.x86_64.rpm

rpm -i gdisk-0.8.2-1.el6.x86_64.rpm
rpm -i gperftools-libs-2.0-11.el6.3.x86_64.rpm
rpm -i gperftools-devel-2.0-11.el6.3.x86_64.rpm

rpm -i leveldb-1.7.0-2.el6.x86_64.rpm
rpm -i leveldb-devel-1.7.0-2.el6.x86_64.rpm

rpm -i librados2-0.72.1-0.el6.x86_64.rpm
rpm -i librbd1-0.72.1-0.el6.x86_64.rpm
rpm -i libcephfs1-0.72.1-0.el6.x86_64.rpm
rpm -i libcephfs_jni1-0.72.1-0.el6.x86_64.rpm

rpm -i python-ceph-0.72.1-0.el6.x86_64.rpm
rpm -i ceph-0.72.1-0.el6.x86_64.rpm
rpm -i ceph-devel-0.72.1-0.el6.x86_64.rpm
rpm -i rbd-fuse-0.72.1-0.el6.x86_64.rpm
rpm -i rest-bench-0.72.1-0.el6.x86_64.rpm
rpm -i ceph-fuse-0.72.1-0.el6.x86_64.rpm
============================================================
在/etc/ceph/下创建ceph.conf文件,大概内容如下

; global
[global]
; enable secure authentication
; auth supported = cephx
keyring = /etc/ceph/keyring.bin

; after v0.56
auth cluster required = cephx
auth service required = cephx
auth client required = cephx

mon clock drift allowed = 0.5
mon data avail warn = 10
max open files = 65535

tcp nodelay = false


# receive buffer = 256k
tcp rcvbuf = 262144

; Description: Sets the number of replicas for objects in the pool. The default value is the same as ceph osd pool set {pool-name} size {size}.
; Type: 32-bit Integer
; Default: 2
; we need change to 3
osd pool default size = 3
osd pool default min size = 1

; Description: The maximium number of placement groups per pool.
; Type: Integer
; Default: 65536
; mon max pool pg num = 65536

[client]
rbd cache = true

;128MB
rbd cache size = 134217728

;120MB
rbd cache max dirty = 125829120

client cache size = 30720
client readahead min = 512000

; monitors
;  You need at least one.  You need at least three if you want to
;  tolerate any node failures.  Always create an odd number.
[mon]
mon data = /var/lib/ceph/mon.$id
; some minimal logging (just message traffic) to aid debugging
debug ms = 1

; Description: The IDs of initial monitors in a cluster during startup. If specified, Ceph requires an odd number of monitors to form an initial quorum (e.g., 3).
; Type: String
; Default: None
; if monitors more than 2 , don't set this setting
;mon initial members = 0
[mon.0]
host = cd255
mon addr = 192.168.20.55:6789

; mds
;  You need at least one.  Define two to get a standby.
;[mds]
; where the mds keeps it's secret encryption keys
; keyring = /var/lib/ceph/keyring.mds.$id
;[mds.0]
; host = cd255
; addr = 192.168.20.55

; osd
;  You need at least one.  Two if you want data to be replicated.
;  Define as many as you like. 
[osd]
; This is where the btrfs volume will be mounted.       
osd data = /var/lib/ceph/osd/$id
osd journal = /var/lib/ceph/osd/$id/journal
osd journal size = 512
osd class dir = /usr/lib/rados-classes
osd op threads = 10
keyring = /var/lib/ceph/keyring.osd$id

osd recovery op priority = 1
osd recovery max active = 1
        osd recovery max chunk = 524288

; osd max backfills default is 10
osd max backfills = 1
; osd_max_write_size default is 90
osd max write size = 50

; working with ext4
filestore xattr use omap = true

; solve rbd data corruption
filestore fiemap = false

;filestore flusher = false
filestore min sync interval = 0.5
filestore max sync interval = 10

filestore queue max ops = 1000
;filestore queue max bytes = 209715200
filestore queue committing max ops = 1000
;filestore queue committing max bytes = 209715200

;filestore op threads = 10
[osd.0]
host = cd255
addr = 192.168.20.55
osd data = /var/lib/ceph/osd/osd.0
#osd journal = /var/lib/ceph/osd/255a/journal
osd journal = /home/ceph/journal/osd.$id  
============================================================

在各在机器配置无密码访问,必须各台机都互相有对方的key,只有单方有key是连接不了的
ssh-keygen -d  
data1:cat /root/.ssh/id_dsa.pub | ssh root@data2 "cat - >> /root/.ssh/authorized_keys"
data2:cat /root/.ssh/id_dsa.pub | ssh root@data1 "cat - >> /root/.ssh/authorized_keys"
==============================================================
然后在执行以下即可
mkcephfs -c /etc/ceph/ceph.conf --allhosts -k /etc/ceph/keyring.bin

***** 编译安装时,建议先建立了一个节点,等第一个节点完全正常后,再添加其它节点
***** 注意各个 osd mon mds 等的顺序必须连续,否同后面会部署的时候会很麻烦


===============================================================

为了后面添加 key ,可以先在 /etc/ceph/ceph.conf 里禁用有在安全验证的 key

在每一个节点运行
cp /etc/ceph/keyring.bin /var/lib/ceph/<mon data dir>/keyring

在每一个节点运行
/usr/bin/ceph-mon -i <mon idnum> --mkfs --fsid 6e0d230f-bed4-44c5-9b48-0877e1640ff0 -c /etc/ceph/ceph.conf -d

/etc/init.d/ceph -c /etc/ceph/ceph.conf -a start

ceph auth caps client.admin mon 'allow *' osd 'allow *' mds 'allow'

再为每一个mds建立key
ceph auth get-or-create mds.0 mon 'allow rwx' osd 'allow *' mds 'allow'
ceph auth get-or-create mds.1 mon 'allow rwx' osd 'allow *' mds 'allow'
..............

再为每一个osd建立key
ceph auth get-or-create osd.0 mon 'allow rwx' osd 'allow *'
ceph auth get-or-create osd.1 mon 'allow rwx' osd 'allow *'

    本站是提供个人知识管理的网络存储空间,所有内容均由用户发布,不代表本站观点。请注意甄别内容中的联系方式、诱导购买等信息,谨防诈骗。如发现有害或侵权内容,请点击一键举报。
    转藏 分享 献花(0

    0条评论

    发表

    请遵守用户 评论公约

    类似文章