openstack部署(完整版,无说明,全操作,附带配置文件)

版权声明:文章转发需标明文章出处地址及作者 https://blog.csdn.net/weixin_44267608/article/details/89358798

注意事项

修改配置文件之前先备份,(以下操作没有备份)
记住自己各项的密码,里面rabbitmq中openstack密码为admin,
rabbitmqctl change_password admin admin 重新改密码

环境准备:
controller:192.168.1.100 三块网卡 4G内存
计算节点:192.168.1.11 两块网卡 2G内存
存储节点:192.168.1.12
这里使用的是内部源,大家可以使用别的源进行下载

控制节点

yum install python-openstackclient -y
yum install openstack-selinux -y

yum install mariadb mariadb-server python2-PyMySQL -y

vim /etc/my.cnf.d/openstack.cnf

[mysqld]
bind-address = 192.168.1.100

default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
[mysqld]

systemctl enable mariadb.service
systemctl start mariadb.service

mysql_secure_installation

yum install rabbitmq-server -y

systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service

rabbitmqctl add_user openstack RABBIT_PASS
rabbitmqctl set_permissions openstack “." ".” “.*”

yum install memcached python-memcached -y

vim /etc/sysconfig/memcached
PORT=“11211”
USER=“memcached”
MAXCONN=“1024”
CACHESIZE=“64”
OPTIONS="-l 127.0.0.1,::1,controller"

systemctl enable memcached.service
systemctl start memcached.service

mysql -u root -p123
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO ‘keystone’@‘localhost’
IDENTIFIED BY ‘KEYSTONE_DBPASS’;
GRANT ALL PRIVILEGES ON keystone.* TO ‘keystone’@’%’
IDENTIFIED BY ‘KEYSTONE_DBPASS’;
exit;

yum install openstack-keystone httpd mod_wsgi -y

vim /etc/keystone/keystone.conf

[DEFAULT]

[assignment]

[auth]

[cache]

[catalog]

[cors]

[cors.subdomain]

[credential]

[database]
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@chen/keystone

[domain_config]

[endpoint_filter]

[endpoint_policy]

[eventlet_server]

[federation]

[fernet_tokens]

[healthcheck]

[identity]

[identity_mapping]

[kvs]

[ldap]

[matchmaker_redis]

[memcache]

[oauth1]

[oslo_messaging_amqp]

[oslo_messaging_kafka]

[oslo_messaging_notifications]

[oslo_messaging_rabbit]

[oslo_messaging_zmq]

[oslo_middleware]

[oslo_policy]

[paste_deploy]

[policy]

[profiler]

[resource]

[revoke]

[role]

[saml]

[security_compliance]

[shadow_users]

[signing]

[token]
provider = fernet

[tokenless_auth]

[trust]

su -s /bin/sh -c “keystone-manage db_sync” keystone

keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

 keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
  --bootstrap-admin-url http://controller:35357/v3/ \
  --bootstrap-internal-url http://controller:5000/v3/ \
  --bootstrap-public-url http://controller:5000/v3/ \
  --bootstrap-region-id RegionOne

vim /etc/httpd/conf/httpd.conf 添加ServerName controller

ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

systemctl enable httpd.service
systemctl start httpd.service

vim openrc

export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3

source openrc

openstack user list

openstack project create --domain default
–description “Service Project” service

openstack project create --domain default
–description “Demo Project” demo

openstack user create --domain default
–password=demo demo

openstack role create user

openstack role add --project demo --user demo user

mysql -u root -p123

CREATE DATABASE glance;

GRANT ALL PRIVILEGES ON glance.* TO ‘glance’@‘localhost’
IDENTIFIED BY ‘GLANCE_DBPASS’;

GRANT ALL PRIVILEGES ON glance.* TO ‘glance’@’%’
IDENTIFIED BY ‘GLANCE_DBPASS’;

exit;

openstack user create --domain default --password=glance glance

openstack role add --project service --user glance admin

openstack service create --name glance
–description “OpenStack Image” image

openstack endpoint create --region RegionOne
image public http://controller:9292

openstack endpoint create --region RegionOne
image internal http://controller:9292

openstack endpoint create --region RegionOne
image admin http://controller:9292

yum install openstack-glance -y

vim /etc/glance/glance-api.conf

[DEFAULT]

[cors]
[cors.subdomain]

[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance

[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/

[image_format]

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance

[matchmaker_redis]

[oslo_concurrency]

[oslo_messaging_amqp]

[oslo_messaging_kafka]

[oslo_messaging_notifications]

[oslo_messaging_rabbit]

[oslo_messaging_zmq]

[oslo_middleware]

[oslo_policy]

[paste_deploy]
flavor = keystone

[profiler]

[store_type_location_strategy]

[task]

[taskflow_executor]

vim /etc/glance/glance-registry.conf

[DEFAULT]

[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance

[matchmaker_redis]

[oslo_messaging_amqp]

[oslo_messaging_kafka]

[oslo_messaging_notifications]

[oslo_messaging_rabbit]

[oslo_messaging_zmq]

[oslo_policy]

[paste_deploy]
flavor = keystone

[profiler]

su -s /bin/sh -c “glance-manage db_sync” glance

systemctl enable openstack-glance-api.service
openstack-glance-registry.service
systemctl start openstack-glance-api.service
openstack-glance-registry.service

上传镜像(得先存在镜像)

openstack image create "cirros" \
  --file cirros-0.3.3-x86_64-disk.img \
  --disk-format qcow2 --container-format bare \
  --public

mysql -u root -p123

CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;

GRANT ALL PRIVILEGES ON nova_api.* TO ‘nova’@‘localhost’
IDENTIFIED BY ‘NOVA_DBPASS’;
GRANT ALL PRIVILEGES ON nova_api.* TO ‘nova’@’%’
IDENTIFIED BY ‘NOVA_DBPASS’;

GRANT ALL PRIVILEGES ON nova.* TO ‘nova’@‘localhost’
IDENTIFIED BY ‘NOVA_DBPASS’;
GRANT ALL PRIVILEGES ON nova.* TO ‘nova’@’%’
IDENTIFIED BY ‘NOVA_DBPASS’;

GRANT ALL PRIVILEGES ON nova_cell0.* TO ‘nova’@‘localhost’
IDENTIFIED BY ‘NOVA_DBPASS’;
GRANT ALL PRIVILEGES ON nova_cell0.* TO ‘nova’@’%’
IDENTIFIED BY ‘NOVA_DBPASS’;

exit

openstack user create --domain default --password=nova nova

openstack role add --project service --user nova admin

openstack service create --name nova
–description “OpenStack Compute” compute

openstack endpoint create --region RegionOne
compute public http://controller:8774/v2.1

openstack endpoint create --region RegionOne
compute internal http://controller:8774/v2.1

openstack endpoint create --region RegionOne
compute admin http://controller:8774/v2.1

openstack user create --domain default --password=placement placement

openstack role add --project service --user placement admin

openstack service create --name placement --description “Placement API” placement

openstack endpoint create --region RegionOne placement public http://controller:8778

openstack endpoint create --region RegionOne placement internal http://controller:8778

openstack endpoint create --region RegionOne placement admin http://controller:8778

yum -y install openstack-nova-api openstack-nova-conductor
openstack-nova-console openstack-nova-novncproxy
openstack-nova-scheduler openstack-nova-placement-api

vim /etc/nova/nova.conf

[DEFAULT]
my_ip=192.168.1.100
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
enabled_apis=osapi_compute,metadata
transport_url = rabbit://openstack:admin@controller

[api]
auth_strategy = keystone

[api_database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api

[barbican]

[cache]

[cells]

[cinder]
os_region_name = RegionOne

[cloudpipe]

[conductor]

[console]

[consoleauth]

[cors]

[cors.subdomain]

[crypto]

[database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova

[ephemeral_storage_encryption]

[filter_scheduler]

[glance]
api_servers = http://controller:9292

[guestfs]

[healthcheck]

[hyperv]

[image_file_url]

[ironic]

[key_manager]

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova

[libvirt]
#virt_type=qemu

[matchmaker_redis]

[metrics]

[mks]

[neutron]
#url = http://controller:9696
#auth_url = http://controller:35357
#auth_type = password
#project_domain_name = default
#user_domain_name = default
#region_name = RegionOne
#project_name = service
#username = neutron
#password = neutron
#service_metadata_proxy = true
#metadata_proxy_shared_secret = METADATA_SECRET

[notifications]

[osapi_v21]

[oslo_concurrency]
lock_path=/var/lib/nova/tmp

[oslo_messaging_amqp]

[oslo_messaging_kafka]

[oslo_messaging_notifications]

[oslo_messaging_rabbit]

[oslo_messaging_zmq]

[oslo_middleware]

[oslo_policy]

[pci]
[placement]
os_region_name = RegionOne
auth_type = password
auth_url = http://controller:35357/v3
project_name = service
project_domain_name = Default
username = placement
password = placement
user_domain_name = Default

[quota]

[rdp]

[remote_debug]

[scheduler]

[serial_console]

[service_user]

[spice]

[ssl]

[trusted_computing]

[upgrade_levels]

[vendordata_dynamic_auth]

[vmware]

[vnc]
enabled=true
vncserver_listen=$my_ip
vncserver_proxyclient_address=$my_ip
#novncproxy_base_url = http://192.168.1.100:6080/vnc_auto.html

[workarounds]

[wsgi]

[xenserver]

[xvp]

vim /etc/httpd/conf.d/00-nova-placement-api.conf 在最后添加

<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>

systemctl restart httpd
su -s /bin/sh -c “nova-manage api_db sync” nova
su -s /bin/sh -c “nova-manage cell_v2 map_cell0” nova
su -s /bin/sh -c “nova-manage cell_v2 create_cell --name=cell1 --verbose” nova
su -s /bin/sh -c “nova-manage db sync” nova

nova-manage cell_v2 list_cells

systemctl enable openstack-nova-api.service
openstack-nova-consoleauth.service openstack-nova-scheduler.service
openstack-nova-conductor.service openstack-nova-novncproxy.service

systemctl start openstack-nova-api.service
openstack-nova-consoleauth.service openstack-nova-scheduler.service
openstack-nova-conductor.service openstack-nova-novncproxy.service

计算节点

解决依赖性
wget http://download2.yunwei.edu/shell/openstack_app.tar.gz

tar -zxvf openstack_app.tar.gz

cd openstack-ocata
cd openstack-compute-yilai
yum -y localinstall ./*

yum install openstack-nova-compute -y

vim /etc/nova/nova.conf

[DEFAULT]
my_ip=192.168.1.11           #本机第一块IP
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
enabled_apis=osapi_compute,metadata
transport_url = rabbit://openstack:admin@controller   #此处为控制节点

[api]
auth_strategy = keystone

[api_database]
#connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api

[barbican]

[cache]

[cells]

[cinder]
#os_region_name = RegionOne

[cloudpipe]

[conductor]

[console]

[consoleauth]

[cors]

[cors.subdomain]

[crypto]

[database]
#connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova

[ephemeral_storage_encryption]

[filter_scheduler]

[glance]
api_servers = http://controller:9292

[guestfs]

[healthcheck]

[hyperv]

[image_file_url]

[ironic]

[key_manager]

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova

[libvirt]
virt_type=qemu            #可以调用当前节点虚拟化

[matchmaker_redis]

[metrics]

[mks]

[neutron]
#url = http://controller:9696
#auth_url = http://controller:35357
#auth_type = password
#project_domain_name = default
#user_domain_name = default
#region_name = RegionOne
#project_name = service
#username = neutron
#password = neutron
#service_metadata_proxy = true
#metadata_proxy_shared_secret = METADATA_SECRET

[notifications]

[osapi_v21]

[oslo_concurrency]
lock_path=/var/lib/nova/tmp

[oslo_messaging_amqp]

[oslo_messaging_kafka]

[oslo_messaging_notifications]

[oslo_messaging_rabbit]

[oslo_messaging_zmq]

[oslo_middleware]

[oslo_policy]

[pci]
[placement]
os_region_name = RegionOne
auth_type = password
auth_url = http://controller:35357/v3
project_name = service
project_domain_name = Default
username = placement
password = placement
user_domain_name = Default

[quota]

[rdp]

[remote_debug]

[scheduler]

[serial_console]

[service_user]

[spice]

[ssl]

[trusted_computing]

[upgrade_levels]

[vendordata_dynamic_auth]

[vmware]

[vnc]
enabled=true
vncserver_listen=$my_ip
vncserver_proxyclient_address=$my_ip
novncproxy_base_url = http://192.168.1.100:6080/vnc_auto.html   #控制节点IP

[workarounds]

[wsgi]

[xenserver]

[xvp]

egrep -c ‘(vmx|svm)’ /proc/cpuinfo #是否支持虚拟化

systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service

回到控制节点

openstack hypervisor list

su -s /bin/sh -c “nova-manage cell_v2 discover_hosts --verbose” nova

nova-status upgrade check #测试是否成功

mysql -u root -p123

CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO ‘neutron’@‘localhost’
IDENTIFIED BY ‘NEUTRON_DBPASS’;
GRANT ALL PRIVILEGES ON neutron.* TO ‘neutron’@’%’
IDENTIFIED BY ‘NEUTRON_DBPASS’;

exit;

openstack user create --domain default --password=neutron neutron

openstack role add --project service --user neutron admin

openstack service create --name neutron
–description “OpenStack Networking” network

openstack endpoint create --region RegionOne
network public http://controller:9696

openstack endpoint create --region RegionOne
network internal http://controller:9696

openstack endpoint create --region RegionOne
network admin http://controller:9696

yum -y install openstack-neutron openstack-neutron-ml2
openvswitch openstack-neutron-openvswitch ebtables

vim /etc/neutron/neutron.conf

[DEFAULT]
state_path = /var/lib/neutron
auth_strategy = keystone
core_plugin = ml2
service_plugins = router
dhcp_agent_notification = true
allow_overlapping_ips = True
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
transport_url = rabbit://openstack:admin@controller

[agent]

[cors]

[cors.subdomain]

[database]
connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

[matchmaker_redis]

[nova]
region_name = RegionOne
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
project_name = service
user_domain_name = default
username = nova
password = nova

[oslo_concurrency]
lock_path = $state_path/lock

[oslo_messaging_amqp]

[oslo_messaging_kafka]

[oslo_messaging_notifications]

[oslo_messaging_rabbit]

[oslo_messaging_zmq]

[oslo_middleware]

[oslo_policy]

[qos]

[quotas]

[ssl]

vim /etc/neutron/plugins/ml2/ml2_conf.ini

[DEFAULT]

[ml2]
type_drivers = flat,vxlan
tenant_network_types = vxlan
mechanism_drivers = openvswitch,l2population
extension_drivers = port_security

[ml2_type_flat]

[ml2_type_geneve]

[ml2_type_gre]

[ml2_type_vlan]

[ml2_type_vxlan]
vni_ranges = 1:1000

[securitygroup]
enable_ipset = true

vim /etc/neutron/plugins/ml2/openvswitch_agent.ini

[DEFAULT]

[agent]
tunnel_types = vxlan
l2_population = True

[ovs]
tunnel_bridge = br-tun
local_ip = 192.168.1.200
bridge_mappings =

[securitygroup]
firewall_driver = iptables_hybrid
enable_security_group = true

[xenapi]

vim /etc/neutron/l3_agent.ini

[DEFAULT]
interface_driver = openvswitch
external_network_bridge = br-ex

[agent]

[ovs]

vim /etc/neutron/dhcp_agent.ini

[DEFAULT]
interface_driver = openvswitch
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true

[agent]

[ovs]

vim /etc/neutron/metadata_agent.ini

[DEFAULT]
nova_metadata_ip = controller
metadata_proxy_shared_secret = METADATA_SECRET

[agent]

[cache]

再将/etc/nova/nova.conf中neutron模块的注释打开
vim /etc/nova/nova.conf

[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = true
metadata_proxy_shared_secret = METADATA_SECRET

ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

su -s /bin/sh -c “neutron-db-manage --config-file /etc/neutron/neutron.conf
–config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head” neutron

systemctl restart openstack-nova-api.service

systemctl enable neutron-server.service neutron-dhcp-agent.service openvswitch neutron-openvswitch-agent neutron-metadata-agent.service

systemctl start neutron-server.service neutron-dhcp-agent.service openvswitch neutron-openvswitch-agent neutron-metadata-agent.service

systemctl enable neutron-l3-agent.service
systemctl start neutron-l3-agent.service

ovs-vsctl add-br br-ex

将网桥绑定到我的第三块网卡,ens35
ovs-vsctl add-port br-ex ens35 #添加错误删除ovs-vsctl del-br br-ex

开启路由功能
systemctl enable neutron-l3-agent.service

systemctl start neutron-l3-agent.service

计算节点

yum -y install openvswitch openstack-neutron-openvswitch ebtables ipset

vim /etc/neutron/neutron.conf

[DEFAULT]
#state_path = /var/lib/neutron
auth_strategy = keystone
#core_plugin = ml2
#service_plugins = router
#dhcp_agent_notification = true
#allow_overlapping_ips = True
#notify_nova_on_port_status_changes = true
#notify_nova_on_port_data_changes = true
transport_url = rabbit://openstack:admin@controller

[agent]

[cors]

[cors.subdomain]

[database]
#connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

[matchmaker_redis]

[nova]
#region_name = RegionOne
#auth_url = http://controller:35357
#auth_type = password
#project_domain_name = default
#project_name = service
#user_domain_name = default
#username = nova
#password = nova

[oslo_concurrency]
lock_path = $state_path/lock

[oslo_messaging_amqp]

[oslo_messaging_kafka]

[oslo_messaging_notifications]

[oslo_messaging_rabbit]

[oslo_messaging_zmq]

[oslo_middleware]

[oslo_policy]

[qos]

[quotas]

[ssl]

vim /etc/neutron/plugins/ml2/openvswitch_agent.ini

[DEFAULT]

[agent]
tunnel_types = vxlan
l2_population = True

[ovs]
tunnel_bridge = br-tun
local_ip = 172.168.1.11            #第二块网卡IP
bridge_mappings =

[securitygroup]
firewall_driver = iptables_hybrid
enable_security_group = true

[xenapi]

systemctl enable openvswitch neutron-openvswitch-agent
systemctl start openvswitch neutron-openvswitch-agent

控制节点

yum install openstack-dashboard -y

vim /etc/openstack-dashboard/local_settings #改动以下几项

OPENSTACK_HOST = "controller"

SESSION_ENGINE = 'django.contrib.sessions.backends.cache'

CACHES = {
    'default': {
        'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
        'LOCATION': 'chen1:11211',
    },
}

OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True

OPENSTACK_API_VERSIONS = {
#    "data-processing": 1.1,
    "identity": 3,
    "image": 2,
    "volume": 2,
    "compute": 2,
}

OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"

OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"

ALLOWED_HOSTS = ['horizon.example.com', 'localhost','*']

之后重启相关服务
systemctl restart httpd.service memcached.service

之后在网页上登录即可
192.168.1.100/dashboard

控制节点

mysql -uroot -p123
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO ‘cinder’@‘localhost’
IDENTIFIED BY ‘CINDER_DBPASS’;
GRANT ALL PRIVILEGES ON cinder.* TO ‘cinder’@’%’
IDENTIFIED BY ‘CINDER_DBPASS’;

exit

openstack user create --domain default --password=cinder cinder

openstack role add --project service --user cinder admin

openstack service create --name cinderv2
–description “OpenStack Block Storage” volumev2

openstack service create --name cinderv3
–description “OpenStack Block Storage” volumev3

openstack endpoint create --region RegionOne
volumev2 public http://controller:8776/v2/%(project_id)s

openstack endpoint create --region RegionOne
volumev2 internal http://controller:8776/v2/%(project_id)s

openstack endpoint create --region RegionOne
volumev2 admin http://controller:8776/v2/%(project_id)s

openstack endpoint create --region RegionOne
volumev3 public http://controller:8776/v3/%(project_id)s

openstack endpoint create --region RegionOne
volumev3 internal http://controller:8776/v3/%(project_id)s

openstack endpoint create --region RegionOne
volumev3 admin http://controller:8776/v3/%(project_id)s

存储节点

yum install openstack-cinder -y

vim /etc/cinder/cinder.conf

[DEFAULT]
my_ip = 192.168.1.100
#glance_api_servers = http://controller:9292
auth_strategy = keystone
#enabled_backends = lvm
transport_url = rabbit://openstack:admin@controller

[backend]

[barbican]

[brcd_fabric_example]

[cisco_fabric_example]

[coordination]

[cors]

[cors.subdomain]

[database]
connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder

[fc-zone-manager]

[healthcheck]

[key_manager]

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder

[matchmaker_redis]

[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

[oslo_messaging_amqp]

[oslo_messaging_kafka]

[oslo_messaging_notifications]

[oslo_messaging_rabbit]

[oslo_messaging_zmq]

[oslo_middleware]

[oslo_policy]

[oslo_reports]

[oslo_versionedobjects]

[profiler]

[ssl]

[lvm]
#volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
#volume_group = cinder-vg
#volumes_dir = $state_path/volumes
#iscsi_protocol = iscsi
#iscsi_helper = lioadm
#iscsi_ip_address = 172.16.254.63

su -s /bin/sh -c “cinder-manage db sync” cinder
有时候同步会报错,但是进去查看是成功的

编辑计算节点

cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
vim /etc/nova/nova.conf

[cinder]
os_region_name = RegionOne

在控制节点重启nova-api服务 #因为nova-api能够帮助找到cinder服务
systemctl restart openstack-nova-api.service

计算节点重启计算服务

systemctl restart openstack-nova-compute.service

控制节点

systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service

存储节点

yum install lvm2 -y

之后创建分区,指定分区类型lvm
创建分区-----》t ------》输入类型 l ------》8e(每个版本号不一样)----》w保存退出

创建pv
pvcreate /dev/sdb1

创建VG,取名为volumes
vgcreate cinder-volumes /dev/sdb1

编辑配置文件
vim /etc/lvm/lvm.conf #在devices中加入以下

devices {
...
filter = [ "a/sdb1/", "r/.*/"]

安装相关服务
yum -y install openstack-cinder targetcli python-keystone

配置文件
cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
vim /etc/cinder/cinder.conf

[DEFAULT]
my_ip = 192.168.1.12
glance_api_servers = http://controller:9292
auth_strategy = keystone
enabled_backends = lvm
transport_url = rabbit://openstack:admin@controller

[backend]

[barbican]

[brcd_fabric_example]

[cisco_fabric_example]

[coordination]

[cors]

[cors.subdomain]

[database]
connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder

[fc-zone-manager]

[healthcheck]

[key_manager]

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder

[matchmaker_redis]

[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

[oslo_messaging_amqp]

[oslo_messaging_kafka]

[oslo_messaging_notifications]

[oslo_messaging_rabbit]

[oslo_messaging_zmq]

[oslo_middleware]

[oslo_policy]

[oslo_reports]

[oslo_versionedobjects]

[profiler]

[ssl]

[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-vg
volumes_dir = $state_path/volumes
iscsi_protocol = iscsi
iscsi_helper = lioadm
iscsi_ip_address = 192.168.1.12

systemctl enable openstack-cinder-volume.service target.service
systemctl start openstack-cinder-volume.service target.service

猜你喜欢

转载自blog.csdn.net/weixin_44267608/article/details/89358798