Ceph安装
程序员文章站
2022-07-07 19:51:06
...
#sudo yum install -y yum-utils && sudo yum-config-manager --add-repo https://dl.fedoraproject.org/pub/epel/7/x86_64/ && sudo yum install --nogpgcheck -y epel-release && sudo rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 && sudo rm /etc/yum.repos.d/dl.fedoraproject.org*
# sudo yum install yum-plugin-priorities
192.168.118.133 ceph-node0
192.168.118.134 ceph-node1
192.168.118.135 ceph-node2
机器名同步:
cat /etc/hosts
192.168.118.137 ceph-node0
192.168.118.138 ceph-node1
192.168.118.139 ceph-node2
防火墙
systemctl stop firewalld
systemctl disable firewalld
时间同步
yum install -y ntpdate
.安装epel源与ceph-deploy
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
安装ceph-deploy
rpm -ivh https://mirrors.tuna.tsinghua.edu.cn/ceph/rpm-luminous/el7/noarch/ceph-release-1-1.el7.noarch.rpm
替换 ceph.repo 服务器
sed -i 's#htt.*://download.ceph.com#https://mirrors.tuna.tsinghua.edu.cn/ceph#g' /etc/yum.repos.d/ceph.repo
userdel ceph
adduser -d /home/ceph -m ceph
passwd ceph
1234567890
授权
echo "ceph ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph
chmod 0440 /etc/sudoers.d/ceph
ntp部署
ssh-keygen
ssh-copy-id ceph@ceph-node0
ssh-copy-id ceph@ceph-node1
ssh-copy-id ceph@ceph-node2
验证登录
ssh 'ceph@ceph-node0'
ssh 'ceph@ceph-node1'
ssh 'ceph@ceph-node2'
下载源
sudo wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
安装ceph-deploy
sudo rpm -ivh https://mirrors.tuna.tsinghua.edu.cn/ceph/rpm-luminous/el7/noarch/ceph-release-1-1.el7.noarch.rpm
替换
sudo sed -i 's#htt.*://download.ceph.com#https://mirrors.tuna.tsinghua.edu.cn/ceph#g' /etc/yum.repos.d/ceph.repo
安装
sudo yum install -y ceph-deploy
安装后进入目录
mkdir ceph-install && cd ceph-install
ceph-deploy new ceph-node0 ceph-node1 ceph-node2
安装python2
yum install python2-pip* -y
安装
ceph-deploy install ceph-node0 ceph-node1 ceph-node2
或在每个节点上手动执行 yum install -y ceph
parted /dev/sdb mklabel gpt mkpart primary ext4 0% 50% mkpart primary ext4 50% 100% q
//防火墙
systemctl stop firewalld
systemctl disable firewalld
同步节点参数到各个节点
ceph-deploy --overwrite-conf mon create ceph{-node0,-node1,-node2}
ceph-deploy mon create-initial
1234567890
同步配置
ceph-deploy --overwrite-conf admin ceph-node0 ceph-node1 ceph-node2
元数据
ceph mds stat
yum provides '*/applydeltarpm'
sudo yum install deltarpm
查看集群监控状态
ceph mon_status
fdisk -l
查看端口
netstat -lntp
挂载osd
ceph-deploy osd create ceph-node2 --data /dev/sdb --journal /dev/sdb1
ceph-deploy osd create ceph-node1 --data /dev/sdb --journal /dev/sdb1
ceph -w
luminous 版本需要启动 mgr, 否则 ceph -s 会有 no active mgr 提示
每个monitor装mgr
ceph-deploy mgr create ceph-node0:ceph-node0 ceph-node1:ceph-node1 ceph-node2:ceph-node2
分离公共网络和集群网络(推荐、可选)
按下方所列修改配置文件 ceph.conf (在目录 ~/ceph-install 下操作,注意替换 fsid )
[global]
# 注意替换 fsid
fsid = dca70270-3292-4078-91c3-1fbefcd3bd62
mon_initial_members = ceph-node0, ceph-node1, ceph-node2
mon_host = 192.168.0.150,192.168.0.151,192.168.0.152
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
public network = 192.168.0.0/24
cluster network = 172.16.1.0/24
[mon.a]
host = ceph-node0
mon addr = 192.168.0.150:6789
[mon.b]
host = ceph-node1
mon addr = 192.168.0.151:6789
[mon.c]
host = ceph-node2
mon addr = 192.168.0.152:6789
[osd]
osd data = /var/lib/ceph/osd/ceph-$id
osd journal size = 20000
osd mkfs type = xfs
osd mkfs options xfs = -f
filestore xattr use omap = true
filestore min sync interval = 10
filestore max sync interval = 15
filestore queue max ops = 25000
filestore queue max bytes = 10485760
filestore queue committing max ops = 5000
filestore queue committing max bytes = 10485760000
journal max write bytes = 1073714824
journal max write entries = 10000
journal queue max ops = 50000
journal queue max bytes = 10485760000
osd max write size = 512
osd client message size cap = 2147483648
osd deep scrub stride = 131072
osd op threads = 8
osd disk threads = 4
osd map cache size = 1024
osd map cache bl size = 128
osd mount options xfs = "rw,noexec,nodev,noatime,nodiratime,nobarrier"
osd recovery op priority = 4
osd recovery max active = 10
osd max backfills = 4
[client]
rbd cache = true
rbd cache size = 268435456
rbd cache max dirty = 134217728
rbd cache max dirty age = 5
同步配置
ceph-deploy --overwrite-conf admin ceph-node0 ceph-node1 ceph-node2
重启节点
systemctl restart ceph\*.service ceph\*.target
查看监听:
netstat -lntp
osd树查看
ceph osd tree
ceph-fs需要元数据mds服务
创建mds,osd上都创建,高可用性
ceph-deploy mds create ceph-node0 ceph-node1 ceph-node2
pool存储池:
ceph osd pool ls
创建存储池
ceph osd pool create data_data 32
查看已有文件系统
ceph fs ls
创建一个名称为data的文件系统
[root@ceph-node0 ceph-install]# ceph osd pool create data_data 32
[root@ceph-node0 ceph-install]# ceph osd pool create data_metadata 32
[root@ceph-node0 ceph-install]# ceph fs new data data_metadata data_data
使用客户端 ceph-fuse 挂载文件系统
[root@ceph-mon yum.repos.d]# yum install -y ceph-fuse
客户端创建目录挂载文件系统
// 创建data录目
[root@ceph-mon ~]# mkdir /data
// 挂载
root@ceph-mon ~]# ceph-fuse -m 192.168.118.137,192.168.118.138,192.168.118.139:6789 /data
查看pg状态
ceph pg stat
20 pgs: 20 active+undersized+degraded; 14512 kB data, 302 GB used, 6388 GB / 6691 GB avail; 12/36 objects degraded (33.333%)
查看集群监控状态
ceph health detail
创建安装ceph object gatway
格式:
$ ceph-deploy install --rgw <gateway-node1> [<gateway-node2> ...]
装在第2个节点
ceph-deploy install --rgw ceph-node0
管理RGW节点
Ceph CLI工具需要在管理员模式下运行,因此需要执行以下命令:
$ ceph-deploy admin ceph-node0
安装RGW实例
执行命令:
$ ceph-deploy rgw create ceph-node0
[root@ceph-node0 ceph-install]# ceph-deploy osd create ceph-node1 --data /dev/sdb1 --journal /dev/sdb1
ceph-deploy osd create ceph-node1 --data /dev/sdb1 --journal /home/osd
ceph-deploy osd prepare ceph-node1:/home/osd ceph-node2:/home/osd
ceph-deploy osd activate ceph-node1:/home/osd ceph-node2:/home/osd
ceph-deploy osd activate node2:/var/local/osd0 node3:/var/local/osd1
[root@ceph-node0 ceph-install]# ceph-deploy osd create ceph-node2 --data /dev/sdb1 --journal /dev/sdb1
ceph --admin-daemon /var/run/ceph/ceph-mon.ceph-node0.asok config set ceph-node0
ceph --admin-daemon /var/run/ceph/ceph-mon.ceph-node1.asok config set
ceph --admin-daemon /var/run/ceph/ceph-mon.ceph-node2.asok config set
public_network=192.168.118.0/255
清空数据
ceph-deploy purge ceph-node0 ceph-node1 ceph-node2
ceph-deploy purgedata ceph-node0 ceph-node1 ceph-node2
ceph-deploy forgetkeys && rm ceph.*
ceph-deploy purge node1 node2
ceph-deploy purgedata node1 node2
ceph-deploy forgetkeys && rm ceph.*
[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://download.ceph.com/rpm-jewel/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
parted /dev/sdb mklabel gpt mkpart primary xfs 0% 50% mkpart primary xfs 50% 100% q
磁盘分区挂载
fdisk /dev/sdb n -- p --默认
查看
fdisk -l
创建文件系统
mke2fs -t ext4 /dev/sdb1 #ext4创建文件系统
内核重新读取分区表
partprobe /dev/sdb
建立ext4文件系统
mkfs.ext4 /dev/sdb1
挂载(临时挂载)
mount /dev/sdb1 /opt
mount /dev/sdb1 /home/osd
//永久挂载 修改配置文件 仿照上层
vim /etc/fstab
UUID的查询: blkid
/dev/sdb1: UUID="d9405a25-9f13-4548-bc4b-b19d62c69126" TYPE="ext4"
mount -a
2019.5.6-091311
创建S3用户
[ceph@ceph-node0 ceph-install]$ radosgw-admin user create --uid="testuser" --display-name="First User"
{
"user_id": "testuser",
"display_name": "First User",
"email": "",
"suspended": 0,
"max_buckets": 1000,
"auid": 0,
"subusers": [],
"keys": [
{
"user": "testuser",
"access_key": "46C4IXBZO8NQMDXWNSNV",
"secret_key": "WCLvYcbLGwviyyMv9G60kMs1lu6GvVYVrLY9qfPE"
}
],
"swift_keys": [],
"caps": [],
"op_mask": "read, write, delete",
"default_placement": "",
"placement_tags": [],
"bucket_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"user_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"temp_url_keys": [],
"type": "rgw",
"mfa_ids": []
}
重启网关
sudo systemctl restart ceph-radosgw.service
错误
[ceph@ceph-node0 ceph-install]$ sudo ceph-deploy new ceph-node0 ceph-node1 ceph-node2
Traceback (most recent call last):
File "/bin/ceph-deploy", line 18, in <module>
from ceph_deploy.cli import main
File "/usr/lib/python2.7/site-packages/ceph_deploy/cli.py", line 1, in <module>
import pkg_resources
ImportError: No module named pkg_resource
[ceph_deploy.osd][DEBUG ] Host ceph-node2 is now ready for osd use.
[ceph@ceph-node0 ceph-install]$ ceph -s
2019-05-07 20:21:50.948 7fc7bbe71700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
2019-05-07 20:21:50.948 7fc7bbe71700 -1 monclient: ERROR: missing keyring, cannot use cephx for authentication
[errno 2] error connecting to the cluster
解决方案:sudo chmod +r /etc/ceph/ceph.client.admin.keyring
上一篇: MySQL之备份恢复
推荐阅读
-
MySQL 5.7.13 源码编译安装配置方法图文教程
-
mysql 5.7.17 免安装版配置方法图文教程(windows10)
-
mysql 5.7.17 安装配置方法图文教程(windows10)
-
mysql 5.7.17 安装配置方法图文教程(ubuntu 16.04)
-
ssl加速功能(安装ssl证书步骤)
-
Windows下安装MySQL 5.7.17压缩版中遇到的坑
-
linux上安装jdk系统(linux操作系统基础知识)
-
mysql 5.7.13 winx64安装配置方法图文教程(win10)
-
spark 安装教程(spark环境搭建及配置)
-
CentOS7下MySQL5.7安装配置方法图文教程(YUM)