openstack集群部署—Cinder控制节点集群

Cinder架构原理

当有用户或Nova compute提供创建卷的请求时,首先由Cinder API接收请求,然后以消息队列的形式发送给Cinder Scheduler来调用,Cinder Scheduler 侦听到来自Cinder API的消息队列后,到数据库中取查询当前存储节点的状态信息,并根据预定策略选择卷的最佳volume service节点,然后将调度的结果发布出来给volume service来调用,当volume service收到volume scheduler 的调度结果后,会去查找volume providers, 从而在特定存储节点上创建相关的卷,然后将相关结果返回给用户,同时将修改的数据写入到数据库中。

部署

创建cinder数据库

1
2
3
4
5
6
7
8
9
# 在任意控制节点创建数据库,后台数据自动同步,以controller01节点为例;
[root@controller01 ~]# mysql -uroot -pmysql_pass

MariaDB [(none)]> CREATE DATABASE cinder;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY '123456';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY '123456';

MariaDB [(none)]> flush privileges;
MariaDB [(none)]> exit;

创建cinder-api

1
2
3
# 在任意控制节点操作,以controller01节点为例;
# 调用cinder服务需要认证信息,加载环境变量脚本即可
[root@controller01 ~]# . admin-openrc

创建cinder用户

1
2
3
# service项目已在glance章节创建;
# neutron用户在”default” domain中
[root@controller01 ~]# openstack user create --domain default --password=123456 cinder

cinder赋权

1
2
# 为cinder用户赋予admin权限
[root@controller01 ~]# openstack role add --project service --user cinder admin

创建cinder服务实体

1
2
3
4
# cinder服务实体类型”volume”;
# 创建v2/v3两个服务实体
[root@controller01 ~]# openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
[root@controller01 ~]# openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3

创建cinder-api

1
2
3
4
5
6
7
8
9
10
11
12
# 注意--region与初始化admin用户时生成的region一致;
# api地址统一采用vip,如果public/internal/admin分别使用不同的vip,请注意区分;
# cinder-api 服务类型为volume;
# cinder-api后缀为用户project-id,可通过”openstack project list”查看
# v2 public api
[root@controller01 ~]# openstack endpoint create --region RegionOne volumev2 public http://controller01:8776/v2/%\(project_id\)s

# v2 internal api
[root@controller01 ~]# openstack endpoint create --region RegionOne volumev2 internal http://controller01:8776/v2/%\(project_id\)s

# v2 admin api
[root@controller01 ~]# openstack endpoint create --region RegionOne volumev2 admin http://controller01:8776/v2/%\(project_id\)s
1
2
3
4
5
6
7
8
# v3 public api
[root@controller01 ~]# openstack endpoint create --region RegionOne volumev3 public http://controller01:8776/v3/%\(project_id\)s

# v3 internal api
[root@controller01 ~]# openstack endpoint create --region RegionOne volumev3 internal http://controller01:8776/v3/%\(project_id\)s

# v3 admin api
[root@controller01 ~]# openstack endpoint create --region RegionOne volumev3 admin http://controller01:8776/v3/%\(project_id\)s

安装cinder

1
2
# 在全部控制节点安装cinder服务,以controller01节点为例
[root@controller01 ~]# yum install openstack-cinder -y

配置cinder.conf

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# 在全部控制节点操作,以controller01节点为例;
# 注意”my_ip”参数,根据节点修改;
# 注意cinder.conf文件的权限:root:cinder
[root@controller01 ~]# cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
[root@controller01 ~]# egrep -v "^$|^#" /etc/cinder/cinder.conf
# 在全部控制节点操作,以controller01节点为例;
# 注意”my_ip”参数,根据节点修改;
# 注意cinder.conf文件的权限:root:cinder
[root@controller01 ~]# cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
[root@controller01 ~]# egrep -v "^$|^#" /etc/cinder/cinder.conf
[DEFAULT]
state_path = /var/lib/cinder
my_ip = 192.168.182.131
glance_api_servers = http://controller:9292
auth_strategy = keystone
osapi_volume_listen = $my_ip
osapi_volume_listen_port = 8776
log_dir = /var/log/cinder
# 前端采用haproxy时,服务连接rabbitmq会出现连接超时重连的情况,可通过各服务与rabbitmq的日志查看;
# transport_url = rabbit://openstack:rabbitmq_pass@controller:5673
# rabbitmq本身具备集群机制,官方文档建议直接连接rabbitmq集群;但采用此方式时服务启动有时会报错,原因不明;如果没有此现象,强烈建议连接rabbitmq直接对接集群而非通过前端haproxy
transport_url=rabbit://openstack:rabbitmq_pass@controller01:5672,controller02:5672
[backend]
[backend_defaults]
[barbican]
[brcd_fabric_example]
[cisco_fabric_example]
[coordination]
[cors]
[database]
connection = mysql+pymysql://cinder:123456@controller/cinder
[fc-zone-manager]
[healthcheck]
[key_manager]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller01:11211,controller02:11211
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = cinder
password = cinder_pass
[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = $state_path/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[oslo_versionedobjects]
[profiler]
[service_user]
[ssl]
[vault]

配置nova.conf

1
2
3
4
5
6
# 在全部控制节点操作,以controller01节点为例;
# 配置只涉及nova.conf的”[cinder]”字段;
# 加入对应region
[root@controller01 ~]# vim /etc/nova/nova.conf
[cinder]
os_region_name=RegionOne

同步cinder数据库

1
2
3
# 任意控制节点操作;
# 忽略部分”deprecation”信息
[root@controller01 ~]# su -s /bin/sh -c "cinder-manage db sync" cinder

启动服务

1
2
3
4
5
6
7
8
9
10
11
# 全部控制节点操作;
# 变更nova配置文件,首先需要重启nova服务
[root@controller01 ~]# systemctl restart openstack-nova-api.service


# 开机启动
[root@controller01 ~]# systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service

# 启动
[root@controller01 ~]# systemctl restart openstack-cinder-api.service
[root@controller01 ~]# systemctl restart openstack-cinder-scheduler.service

验证

1
2
3
4
5
[root@controller01 ~]# . admin-openrc 

# 查看agent服务;
# 或:cinder service-list
[root@controller01 ~]# openstack volume service list
Donate