ceph对象存储集群部署

集群架构

1
2
3
4
5
192.168.10.186   ceph1          admin、mon、mgr、osd、rgw

192.168.10.187 ceph2 mon、mgr、osd、rgw

192.168.10.188 ceph3 mon、mgr、osd、rgw

部署

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
[root@10dot186 ~]# vim /etc/hosts
192.168.10.186 ceph1
192.168.10.187 ceph2
192.168.10.188 ceph3

hostnamectl set-hostname ceph1
hostnamectl set-hostname ceph2
hostnamectl set-hostname ceph3

ntpdate ntp1.aliyun.com



ssh-keygen
ssh-copy-id ceph1
ssh-copy-id ceph2
ssh-copy-id ceph3


[root@ceph1 ~]# vim /etc/yum.repos.d/ceph.repo
[ceph]
name=Ceph packages for $basearch
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/$basearch
enabled=1
gpgcheck=1
priority=1
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc

[ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch
enabled=1
gpgcheck=1
priority=1
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc

[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS
enabled=0
gpgcheck=1
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1




yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
yum makecache
yum update -y
yum install -y ceph-deploy




mkdir /etc/ceph && cd /etc/ceph
ceph-deploy new ceph1 ceph2 ceph3


yum install -y python-setuptools
1
2
3
4
5
6
在配置文件中增加:
osd_pool_default_size = 3
[mgr]
mgr modules = dashboard
[mon]
mon allow pool delete = true

mon

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
ceph-deploy install ceph1 ceph2 ceph3

ceph-deploy mon create-initial



[root@ceph1 ceph]# ceph -s
cluster:
id: fcb2fa5e-481a-4494-9a27-374048f37113
health: HEALTH_OK

services:
mon: 3 daemons, quorum ceph1,ceph2,ceph3
mgr: no daemons active
osd: 0 osds: 0 up, 0 in

data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0B
usage: 0B used, 0B / 0B avail
pgs:

mgr

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
ceph-deploy mgr create ceph1 ceph2 ceph3


[root@ceph1 ceph]# ceph -s
cluster:
id: fcb2fa5e-481a-4494-9a27-374048f37113
health: HEALTH_OK

services:
mon: 3 daemons, quorum ceph1,ceph2,ceph3
mgr: ceph1(active), standbys: ceph2, ceph3
osd: 0 osds: 0 up, 0 in

data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0B
usage: 0B used, 0B / 0B avail
pgs:



[root@ceph1 ceph]# ceph mgr dump
{
"epoch": 4,
"active_gid": 4122,
"active_name": "ceph1",
"active_addr": "192.168.10.186:6800/22316",
"available": true,
"standbys": [
{
"gid": 4129,
"name": "ceph2",
"available_modules": [
"balancer",
"dashboard",
"influx",
"localpool",
"prometheus",
"restful",
"selftest",
"status",
"zabbix"
]
},
{
"gid": 4132,
"name": "ceph3",
"available_modules": [
"balancer",
"dashboard",
"influx",
"localpool",
"prometheus",
"restful",
"selftest",
"status",
"zabbix"
]
}
],
"modules": [
"balancer",
"restful",
"status"
],
"available_modules": [
"balancer",
"dashboard",
"influx",
"localpool",
"prometheus",
"restful",
"selftest",
"status",
"zabbix"
],
"services": {}
}
[root@ceph1 ceph]# ceph mgr module enable dashboard
[root@ceph1 ceph]# ceph mgr dump
{
"epoch": 7,
"active_gid": 4139,
"active_name": "ceph1",
"active_addr": "192.168.10.186:6800/22316",
"available": true,
"standbys": [
{
"gid": 4136,
"name": "ceph3",
"available_modules": [
"balancer",
"dashboard",
"influx",
"localpool",
"prometheus",
"restful",
"selftest",
"status",
"zabbix"
]
},
{
"gid": 4141,
"name": "ceph2",
"available_modules": [
"balancer",
"dashboard",
"influx",
"localpool",
"prometheus",
"restful",
"selftest",
"status",
"zabbix"
]
}
],
"modules": [
"balancer",
"dashboard",
"restful",
"status"
],
"available_modules": [
"balancer",
"dashboard",
"influx",
"localpool",
"prometheus",
"restful",
"selftest",
"status",
"zabbix"
],
"services": {}
}





[root@ceph1 ceph]# ceph config-key put mgr/dashboard/server_addr 192.168.6.101
set mgr/dashboard/server_addr
[root@ceph1 ceph]# ceph config-key put mgr/dashboard/server_port 7000
set mgr/dashboard/server_port
[root@ceph1 ~]# netstat -tulnp |grep 7000
tcp 0 0 192.168.6.101:7000 0.0.0.0:* LISTEN 19836/ceph-mgr

这时看下danshboard图:

osd

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
每台机器做逻辑卷
[root@ceph1 ceph]# pvcreate /dev/sdb
Physical volume "/dev/sdb" successfully created.

[root@ceph1 ceph]# vgcreate data_vg1 /dev/sdb
Volume group "data_vg1" successfully created

[root@ceph1 ceph]# lvcreate -n data_lv1 -L 99g data_vg1
Logical volume "data_lv1" created.




ceph-deploy osd create ceph1 --data data_vg1/data_lv1
ceph-deploy osd create ceph2 --data data_vg1/data_lv1
ceph-deploy osd create ceph3 --data data_vg1/data_lv1

[root@ceph1 ceph]# ceph -s
cluster:
id: fcb2fa5e-481a-4494-9a27-374048f37113
health: HEALTH_OK

services:
mon: 3 daemons, quorum ceph1,ceph2,ceph3
mgr: ceph1(active), standbys: ceph3, ceph2
osd: 3 osds: 3 up, 3 in

data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0B
usage: 3.01GiB used, 294GiB / 297GiB avail
pgs:

这时看下danshboard图:

rgw集群

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
ceph-deploy install --rgw ceph1 ceph2 ceph3

ceph-deploy admin ceph1 ceph2 ceph3

ceph-deploy rgw create ceph1 ceph2 ceph3

[root@ceph1 ceph]# ceph -s
cluster:
id: fcb2fa5e-481a-4494-9a27-374048f37113
health: HEALTH_OK

services:
mon: 3 daemons, quorum ceph1,ceph2,ceph3
mgr: ceph1(active), standbys: ceph3, ceph2
osd: 3 osds: 3 up, 3 in
rgw: 3 daemons active

data:
pools: 4 pools, 32 pgs
objects: 191 objects, 3.08KiB
usage: 3.01GiB used, 294GiB / 297GiB avail
pgs: 32 active+clean

这时看下danshboard图:

NGINX代理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
安装这里就不介绍了

[root@ceph1 conf.d]# cat cephcloud.dev.goago.cn.conf
upstream cephcloud.dev.goago.cn {
server 192.168.10.186:7480;
server 192.168.10.187:7480;
server 192.168.10.188:7480;
}
server {
listen 80;
server_name cephcloud.dev.goago.cn;
location / {
proxy_intercept_errors on;
access_log /var/log/nginx/cephcloud_log;
proxy_pass http://cephcloud.dev.goago.cn;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Request_Uri $request_uri;

}
}

s3和swift

1
2
3
4
5
6
7
8
9
10
11
12
13
14
具体安装这里不叙述了,可以看我上篇文章


New settings:
Access Key: M954JYYAOBES65B7UNEZ
Secret Key: 11MZu3N9vB4S4C4N8U2Ywgkhxro3Xi6K9HPyRQ9v
Default Region: US
S3 Endpoint: cephcloud.dev.goago.cn
DNS-style bucket+hostname:port template for accessing a bucket: %(bucket)s.cephcloud.dev.goago.cn bucket
Encryption password: 123456
Path to GPG program: /usr/bin/gpg
Use HTTPS protocol: False
HTTP Proxy server name:
HTTP Proxy server port: 0
Donate