1 创建一个 ceph pool 创建存储池

1.1 ceph 集群请看这里:RookCeph安装

1
2
3
ceph osd pool create cephfs_data 128
ceph osd pool set-quota cephfs_data max_bytes $((20 * 1024 * 1024 * 1024)) #20G的存储池
cephfs pool init cephfs_data

1.2 查看集群状态

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
ceph -s
cluster:
id: xxxxxxxxx-xxxx-11ec-bfb9-67a0f564e0d6
health: HEALTH_OK

services:
mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 7d)
mgr: ceph01.pjvndt(active, since 7d), standbys: ceph02.injlkl, ceph03.sulrio
mds: 1/1 daemons up, 2 standby
osd: 8 osds: 8 up (since 7d), 8 in (since 7d)
rgw: 3 daemons active (3 hosts, 1 zones)

data:
volumes: 1/1 healthy
pools: 10 pools, 265 pgs
objects: 269 objects, 8.3 KiB
usage: 20 GiB used, 400 GiB / 420 GiB avail
pgs: 265 active+clean

1.3 查看用户 key

1
2
3
4
5
6
7
8
ceph auth get client.admin
[client.admin]
key = AQBTyCpiHHhbARAAfHqI0X9iMd3rnzJQHaMLkw==
caps mds = "allow *"
caps mgr = "allow *"
caps mon = "allow *"
caps osd = "allow *"
exported keyring for client.admin

或者自己创建存储池、用户以及用户 key

1
2
3
4
5
ceph osd pool create kubernetes
cephfs pool init kubernetes
ceph auth get-or-create client.kubernetes mon 'profile cephfs' osd 'profile cephfs pool=kubernetes' mgr 'profile cephfs pool=kubernetes'
[client.kubernetes]
key = AQD9o0Fd6hEWSDAAAt7fMaSZXduT3NWEqylNpmg==

2 k8s ceph 配置

2.1 生成 ceph-csi 的 kubernetes configmap

1
2
3
4
5
6
7
8
9
10
11
ceph mon dump
epoch 3
fsid xxxxxxxxx-xxxx-11ec-bfb9-67a0f564e0d6
last_changed 2022-03-11T03:57:44.615026+0000
created 2022-03-11T03:56:03.739853+0000
min_mon_release 16 (pacific)
election_strategy: 1
0: [v2:10.1.1.1:3300/0,v1:10.1.1.1:6789/0] mon.ceph01
1: [v2:10.1.1.2:3300/0,v1:10.1.1.2:6789/0] mon.ceph02
2: [v2:10.1.1.3:3300/0,v1:10.1.1.3:6789/0] mon.ceph03
dumped monmap epoch 3

2.2 用以上的的信息生成 configmap

下载此目录所有文件
可以参考官网的,需要自己改镜像

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
cat csi-config-map.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: "ceph-csi-config"
namespace: cephfs
data:
config.json: |-
[
{
"clusterID": "xxxxxxxxx-xxxx-11ec-bfb9-67a0f564e0d6",
"monitors": [
"10.1.1.1:6789",
"10.1.1.2:6789",
"10.1.1.3:6789"
]
}
]

2.3 修改 kubectl 路径(可选)

这里我的 kubelet 存储路径在/data/kubelet/

如果这里不对的话会报在不到驱动

1
2
3
4
5
6
7
8
9
10
11
12
13
grep plugins_registry *
csi-cephfsplugin.yaml: - "--kubelet-registration-path=/var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock"
csi-cephfsplugin.yaml: mountPath: /var/lib/kubelet/pods
csi-cephfsplugin.yaml: mountPath: /var/lib/kubelet/plugins
csi-cephfsplugin.yaml: path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/
csi-cephfsplugin.yaml: path: /var/lib/kubelet/plugins_registry/
csi-cephfsplugin.yaml: path: /var/lib/kubelet/pods
csi-cephfsplugin.yaml: path: /var/lib/kubelet/plugins
csi-cephfsplugin.yaml: path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/mountinfo
csi-nodeplugin-psp.yaml: - pathPrefix: '/var/lib/kubelet/pods'
csi-nodeplugin-psp.yaml: - pathPrefix: '/var/lib/kubelet/plugins/cephfs.csi.ceph.com'
csi-nodeplugin-psp.yaml: - pathPrefix: '/var/lib/kubelet/plugins_registry'
csi-nodeplugin-psp.yaml: - pathPrefix: '/var/lib/kubelet/plugins'

sed -i "s/var\/lib/data/g" *

1
2
3
4
5
6
7
8
9
10
11
12
13
grep kubelet *
csi-cephfsplugin.yaml: - "--kubelet-registration-path=/data/kubelet/plugins/cephfs.csi.ceph.com/csi.sock"
csi-cephfsplugin.yaml: mountPath: /data/kubelet/pods
csi-cephfsplugin.yaml: mountPath: /data/kubelet/plugins
csi-cephfsplugin.yaml: path: /data/kubelet/plugins/cephfs.csi.ceph.com/
csi-cephfsplugin.yaml: path: /data/kubelet/plugins_registry/
csi-cephfsplugin.yaml: path: /data/kubelet/pods
csi-cephfsplugin.yaml: path: /data/kubelet/plugins
csi-cephfsplugin.yaml: path: /data/kubelet/plugins/cephfs.csi.ceph.com/mountinfo
csi-nodeplugin-psp.yaml: - pathPrefix: '/data/kubelet/pods'
csi-nodeplugin-psp.yaml: - pathPrefix: '/data/kubelet/plugins/cephfs.csi.ceph.com'
csi-nodeplugin-psp.yaml: - pathPrefix: '/data/kubelet/plugins_registry'
csi-nodeplugin-psp.yaml: - pathPrefix: '/data/kubelet/plugins'

2.4 生成 ceph-csi cephx 的 secret

1
2
3
4
5
6
7
8
9
10
11
12
13
cat <<EOF > csi-cephfs-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: csi-cephfs-secret
namespace: cephfs
stringData:
userID: admin
userKey: xxxxx3rnzJQHaMLkw==

adminID: admin
adminKey: xxxxx0X9iMd3rnzJQHaMLkw==
EOF

2.5 部署 csi

kubectl apply -f . -n cephfs

3 使用 ceph 块儿设备

3.1 查看 fsName

1
2
ceph fs ls
name: testfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]

3.2 创建 storageclass

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-cephfs-sc
provisioner: cephfs.csi.ceph.com
parameters:
clusterID: 2a8e37c8-a0ef-11ec-bfb9-67a0f564e0d6
pool: cephfs_data
fsName: testfs
csi.storage.k8s.io/provisioner-secret-name: csi-cephfs-secret
csi.storage.k8s.io/provisioner-secret-namespace: cephfs
csi.storage.k8s.io/controller-expand-secret-name: csi-cephfs-secret
csi.storage.k8s.io/controller-expand-secret-namespace: cephfs
csi.storage.k8s.io/node-stage-secret-name: csi-cephfs-secret
csi.storage.k8s.io/node-stage-secret-namespace: cephfs
reclaimPolicy: Delete
mountOptions:
- discard
  • clusterID 对应之前的步骤中的 fsid

  • secret-name 对应之前的 Secret 名字

  • fsName 对应 ceph fs ls 查到的名字

  • imageFeatures,这个是用来确定创建的 image 的特征的

  • allowVolumeExpansion: true 是否开启在线扩容

    kubectl apply -f sc.yaml -n cephfs

3.3 查看 storageclass

1
2
3
get StorageClass -n cephfs
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
csi-cephfs-sc cephfs.csi.ceph.com Delete Immediate false 8m48s

3.4 创建 PVC

1
2
3
4
5
6
7
8
9
10
11
12
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: csi-cephfs-pvc
namespace: cephfs
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: csi-cephfs-sc

kubectl aaply -f pvc.yaml

3.5 查看 pvc

1
2
3
kubectl get pvc -n cephfs
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
csi-cephfs-pvc Bound pvc-42da0f11-baba-4b73-a1ea-bbaee1f5fad4 1Gi RWO csi-cephfs-sc 2s

3.6 测试挂载 pod 写入

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
cat <<EOF > csi-cephfs-secret.yaml
apiVersion: v1
kind: Pod
metadata:
name: csi-cephfs-demo-pod
namespace: cephfs
spec:
containers:
- name: web-server
image: nginx
volumeMounts:
- name: mypvc
mountPath: /var/lib/www/html
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: csi-cephfs-pvc
readOnly: false
EOF

kubectl apply -f pod.yaml -n cephfs

1
2
3
4
kubectl apply -f pod.yaml -n rbd
kubectl exec -it rbd-pod -n rbd -- sh
echo "hello" > /mnt/1
cat /mnt/1

4 静态创建

4.1 配置 secret

在 pvc 所在的 namespaces 创建

1
2
3
4
5
6
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
data:
key: QVFCVHlDcGlISGhiQVJxxxxxxxx # 上面获取的token要base64加密,很重要

4.2 创建 pv

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
apiVersion: v1
kind: PersistentVolume
metadata:
name: cai-pv
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 1Gi
cephfs:
monitors:
- 10.1.1.1:6789 #ceph的monitors
- 10.1.1.2:6789
path: /volumes/pvc-cephfs #默认为 / ,不会自动创建目录,可通过在cephfs增加subvolume或subvolumegroup,并获取其path来创建新的目录
user: admin #ceph账号,需要后端存储提供
secretRef:
name: ceph-secret #下面的cephfs-seret.yml
#secretFile: "/etc/ceph/cephfs-seret.yml" #如果不用secretRef,也可直接指定secret文件
persistentVolumeReclaimPolicy: Retain
volumeMode: Filesystem

4.3 pvc 挂载

1
2
3
4
5
6
7
8
9
10
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: cai-pvc
spec:
volumeName: cai-pv
accessModes: ["ReadWriteOnce"] # 与上面一样
resources:
requests:
storage: 1Gi

4.4 查看状态

1
2
3
kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
cai-pvc Bound cai-pv 1Gi RWO 63m

5 排错

5.1 查看日志

kubectl logs -f deployment.apps/csi-cephfsplugin-provisioner -n cephfs --all-containers=true --max-log-requests=7

5.2 查看 pvc 状态

kubectl describe pvc csi-cephfs-pvc01 -n cephfs

5.3 测试挂载

mount -t ceph 10.1.1.1:6789,10.1.1.2:6789,10.1.1.3:6789:/volumes/pvc-cephfs -o name=admin,secret=xxxxxx9iMd3rnzJQHaMLkw==