k8s上用cephfs存储

前言

k8s对ceph rbd模式不支持ReadWriteMany(RWX),为了满足k8s的灵活性需求,采用支持多点挂载的cephfs工作模式

安装

ceph端

1
2
3
4
5
6
ceph-deploy mds create ceph

为cephfs创建存储池
ceph osd pool create cephfs_data 8
ceph osd pool create  cephfs_metadata 8
ceph fs new cephfs cephfs_metadata    cephfs_data

查看:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
[root@node1 ceph]# ceph  fs  ls
name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]

[root@node1 ceph]# ceph -s
cluster:
id: cbc04385-1cdf-4512-a3f5-a5b3e8686a05
health: HEALTH_OK

services:
mon: 1 daemons, quorum ceph
mgr: ceph(active)
mds: cephfs-1/1/1 up {0=ceph=up:active}
osd: 1 osds: 1 up, 1 in

data:
pools: 3 pools, 106 pgs
objects: 73 objects, 137MiB
usage: 3.82GiB used, 16.2GiB / 20.0GiB avail
pgs: 106 active+clean

k8s挂载ceph

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
[root@node1 ceph]# cat admin.secret 
AQA6EzBd5zAwIRAAgs+NyCjoAKuSrQKoFvQi9w==

[root@node1 ceph]# mkdir /mnt/cephfs

[root@node1 ceph]# mount -t ceph 192.168.6.101:6789:/ /mnt/cephfs -o name=admin,secretfile=/etc/ceph/admin.secret

[root@node1 ceph]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 2.0G 0 2.0G 0% /dev
tmpfs 2.0G 0 2.0G 0% /dev/shm
tmpfs 2.0G 198M 1.8G 10% /run
tmpfs 2.0G 0 2.0G 0% /sys/fs/cgroup
/dev/mapper/cl-root 36G 12G 24G 34% /
/dev/sda1 1014M 216M 799M 22% /boot
tmpfs 2.0G 12K 2.0G 1% /data/k8s/k8s/kubelet/pods/42d3eb88-aaba-11e9-b1ef-005056b1d2de/volumes/kubernetes.io~secret/coredns-token-qklgj
overlay 36G 12G 24G 34% /data/k8s/docker/data/overlay2/f651aa6ff6070b5a3acb8ba0e8d810a1d699a7853e0d3df802851fa64fc2c029/merged
shm 64M 0 64M 0% /data/k8s/docker/data/containers/531654eae411cc84ed7ed3c2d5e60afa2bd68cca57cf821636369be5869f6f7d/mounts/shm
tmpfs 2.0G 12K 2.0G 1% /data/k8s/k8s/kubelet/pods/4eb99bd2-aaba-11e9-b1ef-005056b1d2de/volumes/kubernetes.io~secret/default-token-npj5x
overlay 36G 12G 24G 34% /data/k8s/docker/data/overlay2/1707081196a127bbf0f46f57a9a6831c8f6d87a6f92dad5a578375d24eb41580/merged
shm 64M 0 64M 0% /data/k8s/docker/data/containers/b6b70ef43252b12414afc08562ceb4002467ff700acb01941dce73cf9e40d88a/mounts/shm
overlay 36G 12G 24G 34% /data/k8s/docker/data/overlay2/3ccf15f58beb54a2b59ab2c5c421ac3a8f3f2d5896b5b12430a8f672783eeb84/merged
tmpfs 396M 0 396M 0% /run/user/0
192.168.6.101:6789:/ 20G 3.9G 17G 20% /mnt/cephfs

创建secret

1
2
3
4
5
6
7
[root@node1 ceph]# cat cephfs-secret.yaml 
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
data:
key: QVFBNkV6QmQ1ekF3SVJBQWdzK055Q2pvQUt1U3JRS29GdlFpOXc9PQ==

pv

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
[root@node1 ceph]# cat cephfs-pv.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
name: cephfs-pv
labels:
pv: cephfs-pv
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
cephfs:
monitors:
- 192.168.6.101:6789
user: admin
secretRef:
name: ceph-secret
readOnly: false
persistentVolumeReclaimPolicy: Delete

pvc

1
2
3
4
5
6
7
8
9
10
11
12
13
14
[root@node1 ceph]# cat cephfs-pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: cephfs-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
selector:
matchLabels:
pv: cephfs-pv
Donate