ceph
# ceph部署
# 1. 导入镜像
docker load -i images/ceph_master.tar
docker load -i images/ceph_v17.tar
docker load -i images/cephcsi_v3.6.1.tar
docker load -i images/csi-attacher_v3.4.0.tar
docker load -i images/csi-node-driver-registrar_v2.5.0.tar
docker load -i images/csi-provisioner_v3.1.0.tar
docker load -i images/csi-resizer_v1.4.0.tar
docker load -i images/csi-snapshotter_v5.0.1.tar
1
2
3
4
5
6
7
8
2
3
4
5
6
7
8
# 2. 执行部署
kubectl create -f yamls/crds.yaml -f yamls/common.yaml -f yamls/operator.yaml
kubectl create -f yamls/cluster.yaml
kubectl apply -f yamls/dashboard-external-http.yaml
kubectl apply -f yamls/object-store.yaml
kubectl apply -f yamls/object-user.yaml
kubectl apply -f yamls/rgw-external.yaml
kubectl apply -f yamls/rook-ceph-svc.yaml
1
2
3
4
5
6
7
2
3
4
5
6
7
# 3. 卸载
osd如果未创建,可以重启operator,以重新运行osd创建job
删除 CephCluster CRD
kubectl delete -f yamls/dashboard-external-http.yaml
kubectl delete -f yamls/object-store.yaml
kubectl delete -f yamls/object-user.yaml
kubectl delete -f yamls/rgw-external.yaml
kubectl delete -f yamls/rook-ceph-svc.yaml
kubectl -n rook-ceph patch cephcluster my-cluster --type merge -p '{"spec":{"cleanupPolicy":{"confirmation":"yes-really-destroy-data"}}}'
kubectl -n rook-ceph delete cephcluster my-cluster
kubectl -n rook-ceph get cephcluster
1
2
3
4
5
6
7
8
2
3
4
5
6
7
8
删除 Operator 及相关资源
kubectl delete -f operator.yaml
kubectl delete -f common.yaml
kubectl delete -f crds.yaml
1
2
3
2
3
删除主机上的数据
rm -rf /var/lib/rook
rm -rf /datarook/rook //dataDirHostPath指定的路径
ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove %
rm -rf /dev/ceph-*
rm -rf /dev/mapper/ceph--*
DISK="/dev/vdb"
dd if=/dev/zero of="$DISK" bs=512k count=1
wipefs -af $DISK
1
2
3
4
5
6
7
8
2
3
4
5
6
7
8
故障排除
清理集群最常见的问题是rook-ceph命名空间或集群 CRD 无限期地保持在terminating状态中。命名空间在其所有资源都被删除之前无法被删除,因此请查看哪些资源正在等待终止。
kubectl -n rook-ceph get pod
kubectl -n rook-ceph get cephcluster
for CRD in $(kubectl get crd -n rook-ceph | awk '/ceph.rook.io/ {print $1}'); do
kubectl get -n rook-ceph "$CRD" -o name | \
xargs -I {} kubectl patch -n rook-ceph {} --type merge -p '{"metadata":{"finalizers": [null]}}'
done
kubectl api-resources --verbs=list --namespaced -o name \
| xargs -n 1 kubectl get --show-kind --ignore-not-found -n rook-ceph
kubectl -n rook-ceph patch configmap rook-ceph-mon-endpoints --type merge -p '{"metadata":{"finalizers": [null]}}'
kubectl -n rook-ceph patch secrets rook-ceph-mon --type merge -p '{"metadata":{"finalizers": [null]}}'
1
2
3
4
5
6
7
8
9
10
2
3
4
5
6
7
8
9
10
以下为测试配置使用
# 安装s3cmd
apt-get install s3cmd
1
# 配置s3cfg
# 定义配置参数
ip="172.26.197.248"
accessKey=`kubectl -n rook-ceph get secret rook-ceph-object-user -o yaml | grep AccessKey | awk 'NR==1' | awk '{print $2}' | base64 -d`
secretKey=`kubectl -n rook-ceph get secret rook-ceph-object-user -o yaml | grep SecretKey | awk 'NR==1' | awk '{print $2}' | base64 -d`
cat <<EOF > ~/.s3cfg
access_key = $accessKey
host_base = $ip:30906
host_bucket = $ip:30906/%(bucket)
secret_key = $secretKey
use_https = No
EOF
1
2
3
4
5
6
7
8
9
10
11
12
13
14
2
3
4
5
6
7
8
9
10
11
12
13
14
# 3. 创建 bucket
s3cmd mb s3://test
1
# 4. 上传文件
s3cmd put -r test s3://test --no-mime-magic
1
# 5. 设置访问权限
s3cmd setpolicy inital-data/acl.json s3://test
1
上次更新: 2023/12/12, 09:29:58