分享

CentOS7手动部署Ceph0.94

 醉人说梦 2017-09-27

--------------------

All-nodes

--------------------


1- 各节点设置 hosts

# vim /etc/hosts

192.168.121.25 monmds01

192.168.121.26 storage01

192.168.121.27 storage02


2- 设置源


这里我的环境将 {ceph-release} 替换为 hammer

将 {distro} 替换为 el7


# vim /etc/yum.repos.d/ceph.repo

     [ceph]
   
name=Ceph packages for $basearch

baseurl=http://download./rpm-{ceph-release}/{distro}/$basearch
enabled=1
priority=2

gpgcheck=1
type=rpm-md
gpgkey=https://download./keys/release.asc

[ceph-noarch]
name=Ceph noarch packages
baseurl=http://download./rpm-{ceph-release}/{distro}/noarch

enabled=1

priority=2

gpgcheck=1

type=rpm-md

gpgkey=https://download./keys/release.asc
[ceph-source]
name=Ceph source packages
baseurl=http://download./rpm-{ceph-release}/{distro}/SRPMS
enabled=0
priority=2

gpgcheck=1
type=rpm-md
gpgkey=https://download./keys/release.asc


# yum update && sudo yum install yum-plugin-priorities -y
# yum install ceph -y



--------------------

Monitor

--------------------


1- 确定 fsid


# uuidgen

7172833a-6d3a-42fe-b146-e3389d684598


2- 配置文件


# vim /etc/ceph/ceph.conf

[global]

fsid = 7172833a-6d3a-42fe-b146-e3389d684598

mon initial members = monmds01

mon host = 192.168.121.25

public network = 192.168.121.0/24

auth cluster required = cephx

auth service required = cephx

auth client required = cephx

osd journal size = 1024

filestore xattr use omap = true

osd pool default size = 2 # 设置两个副本

osd pool default min size = 1

osd pool default pg num = 128  # PGS = (Total_number_of_OSD * 100) / max_replication_count  得出数值较近的2的指数

osd pool default pgp num = 128

osd crush chooseleaf type = 1


3- 创建 keyring


# ceph-authtool --create-keyring /tmp/ceph.mon.keyring \

> --gen-key -n mon. --cap mon 'allow *'


4- 生成管理员 keyring


# ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring \

> --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' \

> --cap osd 'allow *' --cap mds 'allow'


5- client.admin key 导入 ceph.mon.keyring


# ceph-authtool /tmp/ceph.mon.keyring \

> --import-keyring /etc/ceph/ceph.client.admin.keyring


6- 生成 monitor map


# monmaptool --create --add monmds01 192.168.121.25 \

> --fsid 7172833a-6d3a-42fe-b146-e3389d684598 /tmp/monmap


7- 创建 monitor 数据目录 ceph-{hostname}


# mkdir /var/lib/ceph/mon/ceph-monmds01


8- 导入 monitor map keyring 信息


# ceph-mon --mkfs -i monmds01 \

> --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring


9- 创建两个空文件


# touch /var/lib/ceph/mon/ceph-monmds01/done

# touch /var/lib/ceph/mon/ceph-monmds01/sysvinit


10- 启动 monitor


# service ceph start mon.monmds01

=== mon.monmds01 ===

Starting Ceph mon.monmds01 on monmds01...

Running as unit run-5055.service.

Starting ceph-create-keys on monmds01...


检查一下

# ceph -s

cluster 7172833a-6d3a-42fe-b146-e3389d684598

health HEALTH_ERR

64 pgs stuck inactive

64 pgs stuck unclean

no osds

monmap e1: 1 mons at {monmds01=192.168.121.25:6789/0}

election epoch 2, quorum 0 monmds01

osdmap e1: 0 osds: 0 up, 0 in

pgmap v2: 64 pgs, 1 pools, 0 bytes data, 0 objects

0 kB used, 0 kB / 0 kB avail

64 creating

# ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.monmds01.asok mon_status


11- keyring ceph.conf 拷贝到各个 osd 节点


# scp /etc/ceph/ceph.conf storage-x:/etc/ceph/

# scp /etc/ceph/ceph.client.admin.keyring storage-x:/etc/ceph/



--------------------

Osds

--------------------


1- Create OSD


# uuidgen

4cd76113-25d3-4dfd-a2fc-8ef34b87499c

# uuidgen

e2c49f20-fbeb-449d-9ff1-49324b66da26


--storage01--

# ceph osd create 4cd76113-25d3-4dfd-a2fc-8ef34b87499c

0 # 返回值0表示osd-number=0


--storage02--

# ceph osd create e2c49f20-fbeb-449d-9ff1-49324b66da26

1


2- 创建数据存储目录


--storage01--

# mkdir -p /data/ceph/osd/ceph-0 # 目录名为 {cluster-name}-{osd-number}

# ln -s /data/ceph/osd/ceph-0 /var/lib/ceph/osd/


存储挂的磁阵,这里要把挂的硬盘分区、格式化,环境里使用了OpenStack云硬盘对接后端磁阵

# fdisk /dev/vdb

# mkfs.ext4 /dev/vdb1


挂载

# mount -o defaults,_netdev /dev/vdb1 /var/lib/ceph/osd/ceph-0


写入分区表

# vim /etc/fstab

/dev/vdb1 /var/lib/ceph/osd/ceph-0 ext4 defaults,_netdev 0 0


--storage02--

# mkdir -p /data/ceph/osd/ceph-1

# ln -s /data/ceph/osd/ceph-1 /var/lib/ceph/osd/


# fdisk /dev/vdb

# mkfs.ext4 /dev/vdb1


# mount -o defaults,_netdev /dev/vdb1 /var/lib/ceph/osd/ceph-1


# vim /etc/fstab

/dev/vdb1 /var/lib/ceph/osd/ceph-1 ext4 defaults,_netdev 0 0


3- 初始化 OSD 数据目录


--storage01--

# ceph-osd -i 0 --mkfs --mkjournal --mkkey \

> --osd-uuid 4cd76113-25d3-4dfd-a2fc-8ef34b87499c \

> --cluster ceph \

> --osd-data=/data/ceph/osd/ceph-0 \

> --osd-journal=/data/ceph/osd/ceph-0/journal

2015-11-11 13:51:21.950261 7fd64b796880 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway

2015-11-11 13:51:22.461867 7fd64b796880 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway

2015-11-11 13:51:22.463339 7fd64b796880 -1 filestore(/data/ceph/osd/ceph-0) could not find 23c2fcde/osd_superblock/0//-1 in index: (2) No such file or directory

2015-11-11 13:51:22.971198 7fd64b796880 -1 created object store /data/ceph/osd/ceph-0 journal /data/ceph/osd/ceph-0/journal for osd.0 fsid 7172833a-6d3a-42fe-b146-e3389d684598

2015-11-11 13:51:22.971377 7fd64b796880 -1 auth: error reading file: /data/ceph/osd/ceph-0/keyring: can't open /data/ceph/osd/ceph-0/keyring: (2) No such file or directory

2015-11-11 13:51:22.971653 7fd64b796880 -1 created new key in keyring /data/ceph/osd/ceph-0/keyring


--storage02--

# ceph-osd -i 1 --mkfs --mkjournal --mkkey \

> --osd-uuid e2c49f20-fbeb-449d-9ff1-49324b66da26 \

> --cluster ceph \

> --osd-data=/data/ceph/osd/ceph-1 \

> --osd-journal=/data/ceph/osd/ceph-1/journal

2015-11-09 21:42:24.012339 7f6caebdb7c0 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway

2015-11-09 21:42:24.126837 7f6caebdb7c0 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway

2015-11-09 21:42:24.127570 7f6caebdb7c0 -1 filestore(/data/ceph/osd/ceph-1) could not find 23c2fcde/osd_superblock/0//-1 in index: (2) No such file or directory

2015-11-09 21:42:24.221463 7f6caebdb7c0 -1 created object store /data/ceph/osd/ceph-1 journal /data/ceph/osd/ceph-1/journal for osd.1 fsid 7172833a-6d3a-42fe-b146-e3389d684598

2015-11-09 21:42:24.221540 7f6caebdb7c0 -1 auth: error reading file: /data/ceph/osd/ceph-1/keyring: can't open /data/ceph/osd/ceph-1/keyring: (2) No such file or directory

2015-11-09 21:42:24.221690 7f6caebdb7c0 -1 created new key in keyring /data/ceph/osd/ceph-1/keyring


4- 注册 OSD authentication key


--storage01--

# ceph auth add osd.0 osd 'allow *' mon 'allow profile osd' -i /data/ceph/osd/ceph-0/keyring

added key for osd.0


--storage02--

# ceph auth add osd.1 osd 'allow *' mon 'allow profile osd' -i /data/ceph/osd/ceph-1/keyring

added key for osd.1


5- ceph 节点加入 CRUSH map


--storage01--

# ceph osd crush add-bucket storage01 host

added bucket storage01 type host to crush map


# ceph osd crush move storage01 root=default

moved item id -2 name 'storage01' to location {root=default} in crush map


--storage02--

# ceph osd crush add-bucket storage02 host

added bucket storage02 type host to crush map

# ceph osd crush move storage02 root=default

moved item id -3 name 'storage02' to location {root=default} in crush map


6- OSD 加入 CRUSH map 此处设置权重为1.0


--storage01--

# ceph osd crush add osd.0 1.0 host=storage01

add item id 0 name 'osd.0' weight 1 at location {host=storage01} to crush map


--storage02--

# ceph osd crush add osd.1 1.0 host=storage02

add item id 1 name 'osd.1' weight 1 at location {host=storage02} to crush map


7- 创建初始化文件


--storage01--

# touch /var/lib/ceph/osd/ceph-0/sysvinit


--storage02--

# touch /var/lib/ceph/osd/ceph-1/sysvinit


8- 启动服务


--storage01--

# service ceph start osd.0

=== osd.0 ===

create-or-move updated item name 'osd.0' weight 0.96 at location {host=storage01,root=default} to crush map

Starting Ceph osd.0 on storage01...

Running as unit run-27621.service.


--storage02--

# service ceph start osd.1

=== osd.1 ===

create-or-move updated item name 'osd.1' weight 0.05 at location {host=storage02,root=default} to crush map

Starting Ceph osd.1 on storage02...

Running as unit run-2312.service.


9- 检查


# ceph -s

cluster 7172833a-6d3a-42fe-b146-e3389d684598

health HEALTH_OK

monmap e1: 1 mons at {monmds01=192.168.121.25:6789/0}

election epoch 2, quorum 0 monmds01

osdmap e13: 2 osds: 2 up, 2 in

pgmap v33: 64 pgs, 1 pools, 0 bytes data, 0 objects

6587 MB used, 974 GB / 1033 GB avail

64 active+clean


# ceph osd tree

ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY

-1 2.00000 root default

-2 1.00000 host storage01

0 1.00000 osd.0 up 1.00000 1.00000

-3 1.00000 host storage02

1 1.00000 osd.1 up 1.00000 1.00000



--------------------

MDS

--------------------


mds 的坑耽误了很久,官网里只有 ceph-deploy 一笔带过,一下手动过程基本靠摸索,此外启动后是看不到状态的,要创建了 pool 和 fs 以后,才能观察到 mds 状态,比较奇特


1- 创建mds数据目录


# mkdir /var/lib/ceph/mds/ceph-monmds01


2- 创建keyring


# ceph auth get-or-create mds.monmds01 \

> mon 'allow rwx' osd 'allow *' \

> mds 'allow *' -o /var/lib/ceph/mds/ceph-monmds01/keyring


3- 创建初始化文件


# touch /var/lib/ceph/mds/ceph-monmds01/sysvinit

# touch /var/lib/ceph/mds/ceph-monmds01/done


4- 启动mds


# service ceph start mds.monmds01


5- 查看状态,此时还看不到mds信息,要在创建好pool以后才能看到


# ceph -s

cluster 342311f7-c486-479e-9c36-71adf326693e

health HEALTH_OK

monmap e1: 1 mons at {monmds01=192.168.121.25:6789/0}

election epoch 2, quorum 0 monmds01

osdmap e13: 2 osds: 2 up, 2 in

pgmap v29: 64 pgs, 1 pools, 0 bytes data, 0 objects

2202 MB used, 1866 GB / 1968 GB avail

64 active+clean



--------------------

CephFS

--------------------


1- 初始状态


# ceph -s

cluster 342311f7-c486-479e-9c36-71adf326693e

health HEALTH_OK

monmap e1: 1 mons at {monmds01=192.168.121.25:6789/0}

election epoch 2, quorum 0 monmds01

osdmap e13: 2 osds: 2 up, 2 in

pgmap v29: 64 pgs, 1 pools, 0 bytes data, 0 objects

2202 MB used, 1866 GB / 1968 GB avail

64 active+clean


2- 创建pool


# ceph osd pool create cephfs_data 100

# ceph osd pool create cephfs_metadata 100


3- 查看此时状态


# ceph osd lspools

0 rbd,1 cephfs_data,2 cephfs_metadata,


# ceph -s

cluster 342311f7-c486-479e-9c36-71adf326693e

health HEALTH_OK

monmap e1: 1 mons at {monmds01=192.168.121.25:6789/0}

election epoch 2, quorum 0 monmds01

osdmap e17: 2 osds: 2 up, 2 in

pgmap v52: 264 pgs, 3 pools, 0 bytes data, 0 objects

2206 MB used, 1866 GB / 1968 GB avail

264 active+clean


4- 创建fs


# ceph fs new cephfs cephfs_metadata cephfs_data


5- 现在观察状态已经显示mds现状


# ceph -s

cluster 342311f7-c486-479e-9c36-71adf326693e

health HEALTH_OK

monmap e1: 1 mons at {monmds01=192.168.121.25:6789/0}

election epoch 2, quorum 0 monmds01

mdsmap e5: 1/1/1 up {0=monmds01=up:active}

osdmap e18: 2 osds: 2 up, 2 in

pgmap v56: 264 pgs, 3 pools, 1962 bytes data, 20 objects

2207 MB used, 1866 GB / 1968 GB avail

264 active+clean


# ceph mds stat

e5: 1/1/1 up {0=monmds01=up:active}

# ceph fs ls

name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]


6- 挂载CephFS


# df -h

Filesystem Size Used Avail Use% Mounted on

/dev/vda1 40G 14G 25G 36% /

devtmpfs 2.0G 0 2.0G 0% /dev

tmpfs 2.0G 220K 2.0G 1% /dev/shm

tmpfs 2.0G 17M 2.0G 1% /run

tmpfs 2.0G 0 2.0G 0% /sys/fs/cgroup


# mkdir /mnt/mycephfs

# mount -t ceph monmds01:6789:/ \

> /mnt/mycephfs \

> -o name=admin,secret=AQCky1ZWYtdHKRAACe+Nk6gZ6rJerMrlOheG8Q==


# df -h

Filesystem Size Used Avail Use% Mounted on

/dev/vda1 40G 14G 25G 36% /

devtmpfs 2.0G 0 2.0G 0% /dev

tmpfs 2.0G 220K 2.0G 1% /dev/shm

tmpfs 2.0G 17M 2.0G 1% /run

tmpfs 2.0G 0 2.0G 0% /sys/fs/cgroup

monmds01:6789:/ 2.0T 103G 1.9T 6% /mnt/mycephfs


挂载命令中的密码可以在monmds01节点查询


# ceph auth list

client.admin

key: AQCky1ZWYtdHKRAACe+Nk6gZ6rJerMrlOheG8Q==

auid: 0

caps: [mds] allow

caps: [mon] allow *

caps: [osd] allow *


7- 持久挂载


CentOS7fstab挂载启动过早,网络还没通,会影响系统启动,需要加上'_netdev' 参数


# vim /etc/fstab

monmds01:6789:/ /mnt/mycephfs ceph name=admin,secret=AQCky1ZWYtdHKRAACe+Nk6gZ6rJerMrlOheG8Q==,noatime,_netdev 0 2


    本站是提供个人知识管理的网络存储空间,所有内容均由用户发布,不代表本站观点。请注意甄别内容中的联系方式、诱导购买等信息,谨防诈骗。如发现有害或侵权内容,请点击一键举报。
    转藏 分享 献花(0

    0条评论

    发表

    请遵守用户 评论公约

    类似文章 更多