openstack集群部署—集成Ceph

Glance集成Ceph

配置glance-api.conf

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# 在运行glance-api服务的节点修改glance-api.conf文件,含3个控制节点,以controller01节点为例
# 以下只列出涉及glance集成ceph的section
[root@controller01 ~]# vim /etc/glance/glance-api.conf
# 打开copy-on-write功能
[DEFAULT]
show_image_direct_url = True

# 变更默认使用的本地文件存储为ceph rbd存储;
# 注意红色字体部分前后一致
[glance_store]
#stores = file,http
#default_store = file
#filesystem_store_datadir = /var/lib/glance/images/
stores = rbd
default_store = rbd
rbd_store_chunk_size = 8
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf

# 变更配置文件,重启服务
[root@controller01 ~]# systemctl restart openstack-glance-api.service

Cinder集成Ceph

配置cinder.conf

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# cinder利用插件式结构,支持同时使用多种后端存储;
# 在cinder-volume所在节点设置cinder.conf中设置相应的ceph rbd驱动即可;
[DEFAULT]
...
enabled_backends = ceph
glance_api_version = 2
...
[ceph]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
rbd_secret_uuid = 457eb676-33da-42ec-9a8c-9293d545c337

启动

1
systemctl start openstack-cinder-volume.service target.service

验证

1
2
3
# 查看cinder服务状态,cinder-volume集成ceph后,状态”up”;
# 或:cinder service-list
[root@controller01 ~]# openstack volume service list

Nova集成Ceph

配置ceph.conf

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# 如果需要从ceph rbd中启动虚拟机,必须将ceph配置为nova的临时后端;
# 推荐在计算节点的配置文件中启用rbd cache功能;
# 为了便于故障排查,配置admin socket参数,这样每个使用ceph rbd的虚拟机都有1个socket将有利于虚拟机性能分析与故障解决;
# 相关配置只涉及全部计算节点ceph.conf文件的[client]与[client.cinder]字段,以compute01节点为例
[root@compute01 ~]# vim /etc/ceph/ceph.conf
[client]
rbd cache = true
rbd cache writethrough until flush = true
admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok
log file = /var/log/qemu/qemu-guest-$pid.log
rbd concurrent management ops = 20

[client.cinder]
keyring = /etc/ceph/ceph.client.cinder.keyring

# 创建ceph.conf文件中指定的socker与log相关的目录,并更改属主
[root@compute01 ~]# mkdir -p /var/run/ceph/guests/ /var/log/qemu/
[root@compute01 ~]# chown qemu:libvirt /var/run/ceph/guests/ /var/log/qemu/

配置nova.conf

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# 在全部计算节点配置nova后端使用ceph集群的vms池,以compute01节点为例
[root@compute01 ~]# vim /etc/nova/nova.conf
[libvirt]
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
# uuid前后一致
rbd_secret_uuid = 10744136-583f-4a9c-ae30-9bfb3515526b
disk_cachemodes="network=writeback"
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
# 禁用文件注入
inject_password = false
inject_key = false
inject_partition = -2
# 虚拟机临时root磁盘discard功能,”unmap”参数在scsi接口类型磁盘释放后可立即释放空间
hw_disk_discard = unmap
# 原有配置
virt_type=kvm

# 变更配置文件,重启计算服务
[root@compute01 ~]# systemctl restart libvirtd.service openstack-nova-compute.service
[root@compute01 ~]# systemctl status libvirtd.service openstack-nova-compute.service

配置live-migration

修改/etc/libvirt/libvirtd.conf

1
2
3
4
5
6
7
8
9
10
11
# 在全部计算节点操作,以compute01节点为例;
# 以下给出libvirtd.conf文件的修改处所在的行num
[root@compute01 ~]# egrep -vn "^$|^#" /etc/libvirt/libvirtd.conf
# 取消以下三行的注释
22:listen_tls = 0
33:listen_tcp = 1
45:tcp_port = "16509"
# 取消注释,并修改监听端口
55:listen_addr = "本机ip地址"
# 取消注释,同时取消认证
158:auth_tcp = "none"

修改/etc/sysconfig/libvirtd

1
2
3
4
5
# 在全部计算节点操作,以compute01节点为例;
# 以下给出libvirtd文件的修改处所在的行num
[root@compute01 ~]# egrep -vn "^$|^#" /etc/sysconfig/libvirtd
# 取消注释
9:LIBVIRTD_ARGS="--listen"

重启服务

1
2
3
4
5
# libvirtd与nova-compute服务都需要重启
[root@compute01 ~]# systemctl restart libvirtd.service openstack-nova-compute.service

# 查看服务
[root@compute01 ~]# netstat -tunlp | grep 16509
Donate