Skip to content

02 快速部署Ceph分布式高可用集群

一、ceph高可用部署

配置主机信息

#node1
hostnamectl set-hostname node1 && bash

#node2
hostnamectl set-hostname node2 && bash

#node3
hostnamectl set-hostname node3 && bash


# 写入hosts
cat >> /etc/hosts <<EOF
192.168.2.91  node1 ceph-n1
192.168.2.92  node2 ceph-n2
192.168.2.93  node3 ceph-n3
EOF

配置免密

配置基础环境

# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.

# 关闭swap
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

# 关闭selinux
setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

# 时间同步(三个节点同时操作)
cat > /etc/chrony.conf << EOF
server 192.168.2.206 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony
EOF

crontab -e 
* * * * * systemctl restart chronyd

chronyc sources -v

配置yum源(局域网)

[root@harbor ~]# cd /etc/yum.repos.d/
[root@harbor yum.repos.d]# mkdir bak && mv * bak
[root@harbor yum.repos.d]# curl yum.yq.com/yumrepos/C7.repo > C7.repo
[root@harbor yum.repos.d]# yum clean all ;yum makecache

配置基础环境

# 更新yum源
yum update -y
# 安装工具包、python-setuptools一定要安装、不然会报错的
yum install -y chrony conntrack ipset jq iptables curl sysstat libseccomp wget socat git vim

# 关闭无关服务
systemctl stop postfix && systemctl disable postfix

初始化monitor节点

yum install ceph -y

# 初始化monitor节点
# 在ceph-node1节点生成uuid,并在所有节点导入uuid环境变量

[root@ceph-node1 ~]# uuidgen
c774be1a-a533-493e-afaa-95a71f40851c
#ceph-node1
export cephuid=c774be1a-a533-493e-afaa-95a71f40851c
#ceph-node2
export cephuid=c774be1a-a533-493e-afaa-95a71f40851c
#ceph-node3
export cephuid=c774be1a-a533-493e-afaa-95a71f40851c

# 所有节点创建Ceph配置文件:

cat > /etc/ceph/ceph.conf <<EOF
[global]
fsid = c774be1a-a533-493e-afaa-95a71f40851c
mon initial members = node1, node2, node3
mon host = 192.168.2.91, 192.168.2.92, 192.168.2.93
public network = 192.168.2.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
EOF

# 以下操作在ceph-node1节点执行
# 为集群创建一个keyring,并生成一个monitor密钥。
#ceph-node1
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'

# 生成administrator keyring,生成client.admin用户并将用户添加到keyring。
#ceph-node1
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'


# 生成bootstrap-osd keyring,生成client.bootstrap-osd用户并将用户添加到keyring。
#ceph-node1
ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r'


# 将生成的密钥添加到中ceph.mon.keyring。
#ceph-node1
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring

# 将所有者更改为ceph.mon.keyring。
#ceph-node1
chown ceph:ceph /tmp/ceph.mon.keyring

# 使用主机名,主机IP地址和FSID生成monitor map。另存为/tmp/monmap:
#ceph-node1
monmaptool --create --add node1 192.168.2.91 --add node2 192.168.2.92 --add node3 192.168.2.93 --fsid $cephuid /tmp/monmap


# 复制monitor map到另外2个节点
#ceph-node1
scp /tmp/monmap root@node2:/tmp
scp /tmp/monmap root@node3:/tmp


# 复制ceph.client.admin.keyring到另外2个节点
#ceph-node1
scp /etc/ceph/ceph.client.admin.keyring root@node2:/etc/ceph/
scp /etc/ceph/ceph.client.admin.keyring root@node3:/etc/ceph/


# 复制ceph.mon.keyring到另外2个节点
#ceph-node1
scp /tmp/ceph.mon.keyring root@node2:/tmp/
scp /tmp/ceph.mon.keyring root@node3:/tmp/


#注意修改文件权限
#ceph-node2
chown ceph:ceph /tmp/ceph.mon.keyring
#ceph-node3
chown ceph:ceph /tmp/ceph.mon.keyring

# 创建monitor数据目录
#node1
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node1
#node2
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node2
#node3
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node3

# 用monitor map和keyring填充monitor守护程序。
#node1
sudo -u ceph ceph-mon --mkfs -i node1 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
#node2
sudo -u ceph ceph-mon --mkfs -i node2 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
#node3
sudo -u ceph ceph-mon --mkfs -i node3 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring


# 查看生成的文件
#ceph-node1
ls /var/lib/ceph/mon/ceph-node1/
keyring  kv_backend  store.db

# 启动monitor服务
#node1
systemctl restart ceph-mon@node1
systemctl enable ceph-mon@node1 --now
systemctl status ceph-mon@node1

#node2
systemctl restart ceph-mon@node2
systemctl enable ceph-mon@node2 --now
systemctl status ceph-mon@node2

#node3
systemctl restart ceph-mon@node3
systemctl enable ceph-mon@node3 --now
systemctl status ceph-mon@node3

# 查看当前集群状态

ceph -s
  cluster:
    id:     8d2cfd33-9132-48a7-8c00-3ef10cb5ddeb
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum ceph-node1,ceph-node2,ceph-node3 (age 0.35737s)
    mgr: no daemons active
    osd: 0 osds: 0 up, 0 in

  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:      


# 若异常则启用msgr2
# ceph mon enable-msgr2

初始化manager节点

#node1
ceph auth get-or-create mgr.node1 mon 'allow profile mgr' osd 'allow *' mds 'allow *'
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-node1
sudo -u ceph vim /var/lib/ceph/mgr/ceph-node1/keyring
[mgr.node1]
    key = AQDBRFtmDXhxAhAAkHlWILzgxNFAmhNeACxFQg==

#node2
ceph auth get-or-create mgr.node2 mon 'allow profile mgr' osd 'allow *' mds 'allow *'
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-node2
sudo -u ceph vim /var/lib/ceph/mgr/ceph-node2/keyring
[mgr.node2]
    key = AQD4RFtmYQEUBBAAaBJJNmIzLLP9PLVl32brHg==

#node3
ceph auth get-or-create mgr.node3 mon 'allow profile mgr' osd 'allow *' mds 'allow *'
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-node3
sudo -u ceph vim /var/lib/ceph/mgr/ceph-node3/keyring
[mgr.node3]
    key = AQANRVtm79nTEhAA1UIQv7k/Xs6LfYOFw9zkJw==

# 启动ceph-mgr守护程序:
#node1
systemctl restart ceph-mgr@node1
systemctl enable ceph-mgr@node1 --now
systemctl status ceph-mgr@node1

#node2
systemctl restart ceph-mgr@node2
systemctl enable ceph-mgr@node2 --now
systemctl status ceph-mgr@node2

#node3
systemctl restart ceph-mgr@node3
systemctl enable ceph-mgr@node3 --now
systemctl status ceph-mgr@node3

# 通过ceph status查看输出来检查mgr是否出现

# ceph status
  cluster:
    id:     8d2cfd33-9132-48a7-8c00-3ef10cb5ddeb
    health: HEALTH_WARN
            mons are allowing insecure global_id reclaim
            clock skew detected on mon.node2, mon.node3
            OSD count 0 < osd_pool_default_size 3

  services:
    mon: 3 daemons, quorum node1,node2,node3 (age 29s)
    mgr: node3(active, since 19s), standbys: node1, node2
    osd: 0 osds: 0 up, 0 in

  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs: 

添加osd

# 复制keyring到其他2个节点
#node1
scp /var/lib/ceph/bootstrap-osd/ceph.keyring root@node2:/var/lib/ceph/bootstrap-osd/
scp /var/lib/ceph/bootstrap-osd/ceph.keyring root@node3:/var/lib/ceph/bootstrap-osd/

# 创建OSD
[root@node1 ~]# lsblk
NAME        MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda           8:0    0  100G  0 disk 
├─sda1        8:1    0    1G  0 part /boot
└─sda2        8:2    0   99G  0 part 
  ├─cs-root 253:0    0 61.2G  0 lvm  /
  ├─cs-swap 253:1    0  7.9G  0 lvm  
  └─cs-home 253:2    0 29.9G  0 lvm  /home
sdb           8:16   0   10G  0 disk 


# 3个节点上执行
yum install ceph-volume

ceph-volume lvm create --data /dev/sdb


# 启动各个节点osd进程
#node1
systemctl restart ceph-osd@0
systemctl enable ceph-osd@0 --now
systemctl status ceph-osd@0

#node2
systemctl restart ceph-osd@1
systemctl enable ceph-osd@1 --now
systemctl status ceph-osd@1

#node3
systemctl restart ceph-osd@2
systemctl enable ceph-osd@2 --now
systemctl status ceph-osd@2 

# 查看集群状态
ceph -s
  cluster:
    id:     8d2cfd33-9132-48a7-8c00-3ef10cb5ddeb
    health: HEALTH_WARN
            mons are allowing insecure global_id reclaim

  services:
    mon: 3 daemons, quorum node1,node2,node3 (age 5m)
    mgr: node3(active, since 4m), standbys: node1, node2
    osd: 3 osds: 3 up (since 7s), 3 in (since 62s)

  data:
    pools:   1 pools, 1 pgs
    objects: 2 objects, 577 KiB
    usage:   18 MiB used, 30 GiB / 30 GiB avail
    pgs:     1 active+clean

  io:
    client:   1.2 KiB/s rd, 36 KiB/s wr, 1 op/s rd, 1 op/s wr
    recovery: 27 KiB/s, 0 objects/s

添加MDS

# 创建mds数据目录。
#node1
sudo -u ceph mkdir -p /var/lib/ceph/mds/ceph-node1
#node2
sudo -u ceph mkdir -p /var/lib/ceph/mds/ceph-node2
#node3
sudo -u ceph mkdir -p /var/lib/ceph/mds/ceph-node3


# 创建keyring:
#node1
ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-node1/keyring --gen-key -n mds.node1
#node2
ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-node2/keyring --gen-key -n mds.node2
#node3
ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-node3/keyring --gen-key -n mds.node3

# 导入keyring并设置权限:
#node1
ceph auth add mds.node1 osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-node1/keyring
chown ceph:ceph /var/lib/ceph/mds/ceph-node1/keyring

#node2
ceph auth add mds.node2 osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-node2/keyring
chown ceph:ceph /var/lib/ceph/mds/ceph-node2/keyring

#node3
ceph auth add mds.node3 osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-node3/keyring
chown ceph:ceph /var/lib/ceph/mds/ceph-node3/keyring

收尾

所有节点修改ceph.conf配置文件,追加以下内容

cat >> /etc/ceph/ceph.conf <<EOF
[mds.node1]
host = node1

[mds.node2]
host = node2

[mds.node3]
host = node3
EOF


重新启动所有服务

#node1
systemctl restart ceph-mon@node1
systemctl restart ceph-mgr@node1
systemctl restart ceph-mds@node1
systemctl enable ceph-mds@node1
systemctl restart ceph-osd@0

#node2
systemctl restart ceph-mon@node2
systemctl restart ceph-mgr@node2
systemctl restart ceph-mds@node2
systemctl enable ceph-mds@node2
systemctl restart ceph-osd@1

#node3
systemctl restart ceph-mon@node3
systemctl restart ceph-mgr@node3
systemctl restart ceph-mds@node3
systemctl enable ceph-mds@node3
systemctl restart ceph-osd@2


查看集群状态

ceph -s
  cluster:
    id:     8d2cfd33-9132-48a7-8c00-3ef10cb5ddeb
    health: HEALTH_WARN
            mons are allowing insecure global_id reclaim

  services:
    mon: 3 daemons, quorum node1,node2,node3 (age 9s)
    mgr: node3(active, since 4s), standbys: node1, node2
    osd: 3 osds: 3 up (since 4s), 3 in (since 2m)

  data:
    pools:   1 pools, 1 pgs
    objects: 2 objects, 577 KiB
    usage:   18 MiB used, 30 GiB / 30 GiB avail
    pgs:     1 active+clean


查看osd状态

[root@node1 ~]# ceph osd tree
ID  CLASS  WEIGHT   TYPE NAME       STATUS  REWEIGHT  PRI-AFF
-1         0.02939  root default                             
-3         0.00980      host node1                           
 0    hdd  0.00980          osd.0       up   1.00000  1.00000
-5         0.00980      host node2                           
 1    hdd  0.00980          osd.1       up   1.00000  1.00000
-7         0.00980      host node3                           
 2    hdd  0.00980          osd.2       up   1.00000  1.00000

二、OSD磁盘扩容

ceph osd tree
ceph osd crush remove osd.0
systemctl stop ceph-osd@0
ceph osd crush remove osd.0

ceph osd purge 0 --yes-i-really-mean-it

for i in  0 ; do ceph osd rm $i; done


umount /var/lib/ceph/osd/*

lvremove /dev/ceph*/osd*

vgremove ceph*
pvremove /dev/sdb

ceph osd tree

ceph -s

三、ceph快照相关

#  获取存储池
[root@ceph-node1 ~]# ceph osd lspools
1 k8srbd1

# 获取镜像名
[root@ceph-node1 ~]# rbd ls k8srbd1
rbda

# 获取镜像信息
[root@ceph-node1 ~]# rbd info k8srbd1/rbda
rbd image 'rbda':
    size 1 GiB in 256 objects
    order 22 (4 MiB objects)
    snapshot_count: 1
    id: ac8ab22c8504
    block_name_prefix: rbd_data.ac8ab22c8504
    format: 2
    features: layering, exclusive-lock
    op_features: 
    flags: 
    create_timestamp: Wed May 29 22:51:31 2024
    access_timestamp: Wed May 29 22:51:31 2024
    modify_timestamp: Wed May 29 22:51:31 2024

# 创建快照
[root@ceph-node1 ~]# rbd snap create  k8srbd1/rbda@2024-05-30

# 查看快照
[root@ceph-node1 ~]# rbd snap list k8srbd1/rbda
SNAPID NAME         SIZE  PROTECTED TIMESTAMP                
     4 2024-05-30-1 1 GiB           Thu May 30 18:32:25 2024 
     5 2024-05-30   1 GiB           Thu May 30 18:38:04 2024 


# 恢复快照
[root@ceph-node1 ~]# rbd snap rollback  k8srbd1/rbda@2024-05-30
Rolling back to snapshot: 100% complete...done.

# 保护快照
rbd snap protect k8srbd1/rbda@2024-05-30

# 取消保护快照
rbd snap unprotect k8srbd1/rbda@2024-05-30

四、告警处理

1、CEPH警告:1 pool(s) do not have an application enabled

现象 使用ceph health查看集群状态,提示HEALTH_WARN,使用ceph -s查看详细信息如下:

[root@ceph12 ~]# ceph -s
  cluster:
    id:     50d4affa-9be6-4e55-9185-59602d63d844
    health: HEALTH_WARN
            1 pool(s) do not have an application enabled

  services:
    mon: 3 daemons, quorum ceph12,ceph13,ceph14 (age 3m)
    mgr: ceph12(active, since 2m), standbys: ceph13, ceph14
    osd: 3 osds: 3 up (since 3m), 3 in (since 104m)
    rgw: 3 daemons active (ceph12, ceph13, ceph14)

  task status:

  data:
    pools:   6 pools, 137 pgs
    objects: 188 objects, 5.9 KiB
    usage:   3.1 GiB used, 1.5 TiB / 1.5 TiB avail
    pgs:     137 active+clean

解决 运行ceph health detail查看详情,提示如下

[root@ceph12 ~]# ceph health detail
HEALTH_WARN 1 pool(s) do not have an application enabled
[WRN] POOL_APP_NOT_ENABLED: 1 pool(s) do not have an application enabled
    application not enabled on pool 'test'
    use 'ceph osd pool application enable <pool-name> <app-name>', where <app-name> is 'cephfs', 'rbd', 'rgw', or freeform for custom applications.

根据提示信息,启用相应application后,再次查看集群状态正常

[root@ceph12 ~]# ceph osd pool application enable test rgw
enabled application 'rgw' on pool 'test'
[root@ceph12 ~]# ceph health detail
HEALTH_OK

2、ceph -s 出现 mon is allowing insecure global_id reclaim

ceph环境:

1个mon节点,3个osd节点(其中一个osd同时是mon节点)

ceph版本:

$ ceph-deploy --version
2.0.1

ceph状态:

$ ceph -s
  cluster:
    id:     37ac4cbb-a2c6-4f81-af1e-e9e39c010c85
    health: HEALTH_WARN
            mon is allowing insecure global_id reclaim

  services:
    mon: 1 daemons, quorum ceph-node-11 (age 64s)
    mgr: no daemons active
    osd: 0 osds: 0 up, 0 in

  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:   

解决办法:禁用不安全模式!

$ ceph config set mon auth_allow_insecure_global_id_reclaim false
$ ceph -s
  cluster:
    id:     37ac4cbb-a2c6-4f81-af1e-e9e39c010c85
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum ceph-node-11 (age 90s)
    mgr: no daemons active
    osd: 0 osds: 0 up, 0 in

  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs: