# Ceph Linux 集群搭建 ## Ceph集群搭建 ### 虚拟化支持 当为QEMU/KVM,XEN,VMWare,LXC,VirtualBox,etc.Examples of Cloud Platforms include OpenStack,CloudStack,OpenNebula,etc.做后端存储时要先安装虚拟化工具:qemu、libvirt。 #### QEMU **RHEL系** ```bash $ yum install qemu-kvm qemu-kvm-tools qemu-img # 实际安装时软件包 $ yum install qemu qemu-img ``` #### libvirt **RHEL系** ```bash yum install libvirt ``` ### 节点配置 每个ceph集群都需要一个监控节点和OSD节点。 #### 环境准备 1. Cluster Name 节点名称 2. 监视器名(一般使用短主机名) 3. SSH key #### 创建监视器 ```bash # 登录监控节点 # 创建配置目录,一般为/etc/ceph # 创建ceph.conf ceph配置文件 $ vim /etc/ceph/ceph.conf [global] # 创建唯一ID添加到配置文件 $ uuidgen [global] fsid = {UUID} # 添加监视器主机到配置文件 mon install members = {hostname}[,{hostname}] # 添加监视器IP到配置文件 mon host = {ip-address}[,{ip-address}] # 创建秘钥环并生成监视器秘钥 $ ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *' # 创建管理员秘钥环并添加客户端 $ ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *' # 创建OSD秘钥环并添加客户端 $ ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r' # 将创建的秘钥添加到监视器秘钥中 $ ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring $ ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring ################################### # 实操 $ ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *' creating /tmp/ceph.mon.keyring $ ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *' creating /etc/ceph/ceph.client.admin.keyring $ ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r' creating /var/lib/ceph/bootstrap-osd/ceph.keyring $ ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring importing contents of /etc/ceph/ceph.client.admin.keyring into /tmp/ceph.mon.keyring $ ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring importing contents of /var/lib/ceph/bootstrap-osd/ceph.keyring into /tmp/ceph.mon.keyring ################################### # 更改秘钥权限 $ chown ceph:ceph /tmp/ceph.mon.keyring # 创建监视器 主机名 IP fsid $ monmaptool --create --add {hostname} {ip-address} --fsid {uuid} /tmp/monmap ########################## # 实操 $ monmaptool --create --add master 172.89.0.13 --fsid a1bbf649-8aca-46ff-b31a-677612a67336 /tmp/monmap monmaptool: monmap file /tmp/monmap monmaptool: set fsid to a1bbf649-8aca-46ff-b31a-677612a67336 monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) ################# # 创建数据目录 $ mkdir /var/lib/ceph/mon/{cluster-name}-{hostname} # 创建守护程序 $ ceph-mon [--cluster {cluster-name}] --mkfs -i {hostname} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring # 配置ceph.conf文件 [global] fsid = {cluster-id} mon initial members = {hostname}[, {hostname}] mon host = {ip-address}[, {ip-address}] public network = {network}[, {network}] cluster network = {network}[, {network}] auth cluster required = cephx auth service required = cephx auth client required = cephx osd journal size = {n} osd pool default size = {n} # Write an object n times. osd pool default min size = {n} # Allow writing n copies in a degraded state. osd pool default pg num = {n} osd pool default pgp num = {n} osd crush chooseleaf type = {n} # 开启集群 $ systemctl start ceph@{hostname} # 检查集群状态 ceph -s ```