Руководство по установке и запуску OpenStack в ALT Linux p8
Инструкция по мотивам установки на Redhat: https://docs.openstack.org/newton/install-guide-rdo/
Инструкция в разработке.
Минимальные требования к оборудованию
- Процессорных ядер - одно;
- Оперативная память от 4Gb;
- Диск 20 Гб.
* На машине с 2Gb RAM - сталкивался с нехваткой памяти и падением процессов.
Пример установки с сетевым модулем на управляющем узле (controller)
- Сетевые интерфейсы *** !!!! переделать с другой структурой сети
- ens19 -
- ens20 -
Установка управляющего узла
Добавляем на узле в /etc/hosts (не удаляйте хост 127.0.0.1)
# Управляющий узел 10.0.0.11 controller # Вычислительный узел 10.0.0.31 compute1
Подготовка к установке
# apt-get update -y # apt-get dist-upgrade -y
- Удаление firewalld
apt-get remove firewalld
Установка ПО
# apt-get install openstack-nova chrony python-module-memcached python3-module-memcached python-module-pymemcache python3-module-pymemcache mariadb-server python-module-MySQLdb python-module-openstackclient openstack-glance python-module-glance python-module-glance_store python-module-glanceclient python-module-glanceclient python-module-glance_store python-module-glance openstack-glance openstack-nova-api openstack-nova-cells openstack-nova-cert openstack-nova-conductor openstack-nova-console openstack-nova-scheduler rabbitmq-server openstack-keystone apache2-mod_wsgi memcached openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge openstack-neutron-l3-agent openstack-neutron-dhcp-agent openstack-neutron-server openstack-neutron-metadata-agent
настройка времени
в /etc/chrony.conf добавляем
allow 10.0.0.0/24
Если имеется настроенный свой NTP, заменяем "pool.ntp.org" на свой.
pool pool.ntp.org iburst
#systemctl enable chronyd.service Synchronizing state of chronyd.service with SysV service script with /lib/systemd/systemd-sysv-install. Executing: /lib/systemd/systemd-sysv-install enable chronyd #systemctl start chronyd.service
настройка sql сервера
Комментируем строку "skip-networking" в /etc/my.cnf.d/server.cnf
# cat > /etc/my.cnf.d/openstack.cnf [mysqld] bind-address = 10.0.0.11 default-storage-engine = innodb innodb_file_per_table max_connections = 4096 collation-server = utf8_general_ci character-set-server = utf8
# systemctl enable mariadb # systemctl start mariadb
задаем пароль администратора sql сервера root и удаляем тестовые таблички
- пароль по умолчанию пустой "" (после ввода нового пароля, на все вопросы отвечать утвердительно)
# mysql_secure_installation
настройка сервера сообщений rabbitmq
# systemctl enable rabbitmq.service # systemctl start rabbitmq
Добавляем пользователя:
#rabbitmqctl add_user openstack RABBIT_PASS #rabbitmqctl set_permissions openstack ".*" ".*" ".*"
Настройка memcached
в файле /etc/sysconfig/memcached заменяем строчку LISTEN="127.0.0.1" на
LISTEN="10.0.0.11"
# systemctl enable memcached # systemctl start memcached
Настройка Keystone
Создаём базу данных и пользователя с паролем.
# mysql -u root -p > CREATE DATABASE keystone; > GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'KEYSTONE_DBPASS'; > GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'KEYSTONE_DBPASS';
Сохраняем оригинальный конфигурационный файл.
# mv /etc/keystone/keystone.conf /etc/keystone/keystone.conf.orig
# cat > /etc/keystone/keystone.conf [DEFAULT] [assignment] [auth] [cache] [catalog] [cors] [cors.subdomain] [credential] [database] connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone [domain_config] [endpoint_filter] [endpoint_policy] [eventlet_server] [eventlet_server_ssl] [federation] [fernet_tokens] [identity] [identity_mapping] [kvs] [ldap] [matchmaker_redis] [memcache] [oauth1] [os_inherit] [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_middleware] [oslo_policy] [paste_deploy] [policy] [resource] [revoke] [role] [saml] [shadow_users] [signing] [ssl] [token] provider = fernet [tokenless_auth] [trust]
Заполняем базу данных keystone
# su -s /bin/sh -c "keystone-manage db_sync" keystone
# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone # keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
Пароль пользователя admin - ADMIN_PASS
# keystone-manage bootstrap --bootstrap-password ADMIN_PASS \ --bootstrap-admin-url http://controller:35357/v3/ \ --bootstrap-internal-url http://controller:35357/v3/ \ --bootstrap-public-url http://controller:5000/v3/ \ --bootstrap-region-id
настраиваем apache2 для keystone
у нас apache2 собран без mod_version, поэтому убираем в файле /etc/httpd2/conf/sites-available/openstack-keystone.conf всё строчки
<IfVersion >= 2.4> </IfVersion>
добавляем в активную конфигурацию keystone
# a2ensite openstack-keystone
Добавляем servername в конфигурацию.
echo ServerName controller >/etc/httpd2/conf/sites-enabled/servername.conf
systemctl enable httpd2.service systemctl start httpd2.service
Создание доменов, пользователей и ролей
Для дальнеших работ рекомендуется создать пользователя.
# adduser admin # su - admin
cat >auth export OS_USERNAME=admin export OS_PASSWORD=ADMIN_PASS export OS_PROJECT_NAME=admin export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_DOMAIN_NAME=Default export OS_AUTH_URL=http://controller:35357/v3 export OS_IDENTITY_API_VERSION=3
Создаём пользователя demo
# su - admin . auth openstack project create --domain default --description "Service Project"
Укажите пароль для пользователя demo
openstack project create --domain default --description "Demo Project" demo openstack user create --domain default --password-prompt demo openstack role create user openstack role add --project demo --user demo user
Проверка настроек узла управления
# su - admin $ . auth unset OS_AUTH_URL OS_PASSWORD
пароль "ADMIN_PASS"
openstack --os-auth-url http://controller:35357/v3 \ --os-project-domain-name Default --os-user-domain-name Default \ --os-project-name admin --os-username admin token issue
должно вывести что-то вроде:
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Field | Value | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | expires | 2017-05-16T15:08:43.854293Z | | id | gAAAAABZGwfr4_2NvksY-XnVTayUxh0zZEi4vp7Ff4JmdPqbQQy-W3NG2rs6EzImkevuVbvx4RkCtIWwhaxpbsEUoIFhfwaBwRpqE3fmx7d6OruRucHvFEjmtCKpBPHe9htK0s9hm40n7WmaADaYgi9LgnMto6YRNEBG5mzBJhX0b4NoHgeRA0 | | project_id | d22531fa71e849078c44bb1f00117d87 | | user_id | 7be0608abb9641c5bd8d9f7a3bf519cb | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
проверка пользователя demo:
openstack --os-auth-url http://controller:5000/v3 \ --os-project-domain-name Default --os-user-domain-name Default \ --os-project-name demo --os-username demo token issue
+------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Field | Value | +------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | expires | 2017-05-16T15:10:40.979623Z | | id | gAAAAABZGwhhpQ5BvHvPmM9w6zuXstXZ6JMJDwkbV0zXUBsKLJuJ69CJKux0VoHzxaCKkEuaiOMtIWn2G0u__54HCMQQTvj7f8ddLezXgnlek9KLOPk9FEuoORIg9cahtgqttHgKyLuMKysHzuy331wxrcY-TtsOWWn_yhBJt7NWHtaTN7GEqNg | | project_id | 19493a015aaf4e5f9983b58b460b3794 | | user_id | 9173af4437f34acd86f5a3d4516c53b6 | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
Настройка окружения
su - admin rm auth
cat > admin-openrc export OS_PROJECT_DOMAIN_NAME=Default export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=ADMIN_PASS export OS_AUTH_URL=http://controller:35357/v3 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2
cat > demo-openrc export OS_PROJECT_DOMAIN_NAME=Default export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_NAME=demo export OS_USERNAME=demo export OS_PASSWORD=DEMO_PASS export OS_AUTH_URL=http://controller:5000/v3 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2
проверка окружения
su - admin . admin-openrc openstack token issue
Должно выдать что-то вроде такого:
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Field | Value | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | expires | 2017-05-16T15:48:13.101936Z | | id | gAAAAABZGxEtWlJ0eEGve9Y1VvIRk-wQtZN128A92YPFb5iuTJuo2O7G6Gd9IYdnyPZP6xAXDmT2VzIVbuhvOKQi9bItygi2fWRTw7byAZZdKIvR3mAHpsZyLPpS61hM2ydQLsf6g57xhMKy5y1Fw4Z3uXPabK27dZi1aTslIQZB4RA4Q9WZYWM | | project_id | d22531fa71e849078c44bb1f00117d87 | | user_id | 7be0608abb9641c5bd8d9f7a3bf519cb | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
Настройка сервиса glance
mysql -u root -p CREATE DATABASE glance; GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \ IDENTIFIED BY 'GLANCE_DBPASS'; GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \ IDENTIFIED BY 'GLANCE_DBPASS'; su - admin . admin-openrc
Задаем пароль сервису glance
openstack user create --domain default --password-prompt glance openstack role add --project service --user glance admin openstack service create --name glance --description "OpenStack Image" image openstack endpoint create --region RegionOne image public http://controller:9292 openstack endpoint create --region RegionOne image internal http://controller:9292 openstack endpoint create --region RegionOne image admin http://controller:9292
настраиваем конфиг:
cd /etc/glance/ mv glance-api.conf glance-api.conf_orig cat >glance-api.conf [DEFAULT] use_syslog = true [cors] [cors.subdomain] [database] connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance [glance_store] stores = file,http default_store = file filesystem_store_datadir = /var/lib/glance/images/ [image_format] [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = glance password = GLANCE_PASS [matchmaker_redis] [oslo_concurrency] [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_policy] [paste_deploy] flavor = keystone [profiler] [store_type_location_strategy] [task] [taskflow_executor]
mv /etc/glance/glance-registry.conf /etc/glance/glance-registry.conf.orig cat > /etc/glance/glance-registry.conf [DEFAULT] [database] connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance [glance_store] [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = glance password = GLANCE_PASS [matchmaker_redis] [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_policy] [paste_deploy] flavor = keystone [profiler]
проверка
su - admin . admin-openrc wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img openstack image create "cirros" --file cirros-0.3.4-x86_64-disk.img \ --disk-format qcow2 --container-format bare --public
# openstack image list +--------------------------------------+--------+--------+ | ID | Name | Status | +--------------------------------------+--------+--------+ | f1008c6a-f86a-4c48-8332-2573321e4be1 | cirros | active | +--------------------------------------+--------+--------+
Установка вычислительного узла
начальная подготовка управляющего узла
Создание БД.
mysql -u root -p CREATE DATABASE nova_api; CREATE DATABASE nova; GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS'; GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS'; GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS'; GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';
Создаём пользователя nova и указываем пароль, который потом будет использоваться при настройке.
openstack user create --domain default --password-prompt nova
создаём роль
Создаём сервис nova
openstack service create --name nova --description "OpenStack Compute" compute
создаём API endpoint
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1/%\(tenant_id\)s openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1/%\(tenant_id\)s openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1/%\(tenant_id\)s
Настройка nova
cd /etc/nova/ mv nova.conf nova.conf.orig cat >nova.conf [DEFAULT] log_dir = /var/log/nova state_path = /var/lib/nova connection_type = libvirt compute_driver = libvirt.LibvirtDriver image_service = nova.image.glance.GlanceImageService volume_api_class = nova.volume.cinder.API auth_strategy = keystone network_api_class = nova.network.neutronv2.api.API service_neutron_metadata_proxy = True security_group_api = neutron injected_network_template = /usr/share/nova/interfaces.template enabled_apis = osapi_compute,metadata transport_url = rabbit://openstack:RABBIT_PASS@controller auth_strategy = keystone my_ip = 10.0.0.11 use_neutron = True firewall_driver = nova.virt.firewall.NoopFirewallDriver web=/usr/share/spice-html5 [api_database] connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api [barbican] [cache] [cells] [cinder] [conductor] [cors] [cors.subdomain] [database] connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova [ephemeral_storage_encryption] [glance] api_servers = http://controller:9292 [guestfs] [hyperv] [image_file_url] [ironic] [keymgr] [keystone_authtoken] signing_dir = /var/cache/nova/keystone-signing admin_tenant_name = %SERVICE_TENANT_NAME% admin_user = nova admin_password = %SERVICE_PASSWORD% identity_uri = http://localhost:35357 auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = nova password = NOVA_PASS [libvirt] [matchmaker_redis] [metrics] [neutron] url = http://controller:9696 auth_url = http://controller:35357 auth_type = password project_domain_name = Default user_domain_name = Default region_name = RegionOne project_name = service username = neutron password = NEUTRON_PASS service_metadata_proxy = True metadata_proxy_shared_secret = [osapi_v21] [oslo_concurrency] lock_path = /var/run/nova [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_middleware] [oslo_policy] [rdp] [serial_console] [spice] spicehtml5proxy_host = :: html5proxy_base_url = http://controller:6082/spice_auto.html enabled = True keymap = en-us enabled = true [ssl] [trusted_computing] [upgrade_levels] [vmware] [vnc] enabled = false [workarounds] [xenserver]
заполнение БД nova
su -s /bin/sh -c "nova-manage api_db sync" nova su -s /bin/sh -c "nova-manage db sync" nova
Запуск nova сервиса
# systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service # systemctl start openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
Установка вычислительной ноды
Ставим пакеты
apt-get update apt-get install openstack-nova-compute apt-get dist-upgrade
Поменяйте ip 10.0.0.xxx на ip своей вычислительной ноды
cd /etc/nova mv nova.conf nova.conf.orig cat >nova.conf [DEFAULT] log_dir = /var/log/nova state_path = /var/lib/nova connection_type = libvirt compute_driver = libvirt.LibvirtDriver image_service = nova.image.glance.GlanceImageService volume_api_class = nova.volume.cinder.API auth_strategy = keystone network_api_class = nova.network.neutronv2.api.API service_neutron_metadata_proxy = True security_group_api = neutron injected_network_template = /usr/share/nova/interfaces.template enabled_apis = osapi_compute,metadata compute_driver = libvirt.LibvirtDriver transport_url = rabbit://openstack:RABBIT_PASS@controller auth_strategy = keystone my_ip = 10.0.0.39 use_neutron = True firewall_driver = nova.virt.firewall.NoopFirewallDriver [api_database] [barbican] [cache] [cells] [cinder] [conductor] [cors] [cors.subdomain] [database] connection = mysql://nova:nova@localhost/nova [ephemeral_storage_encryption] [glance] api_servers = http://controller:9292 [guestfs] [hyperv] [image_file_url] [ironic] [keymgr] [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = nova password = NOVA_PASS [libvirt] virt_type = qemu [matchmaker_redis] [metrics] [neutron] url = http://controller:9696 auth_url = http://controller:35357 auth_type = password project_domain_name = Default user_domain_name = Default region_name = RegionOne project_name = service username = neutron password = NEUTRON_PASS [osapi_v21] [oslo_concurrency] lock_path = /var/run/nova [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_middleware] [oslo_policy] [rdp] [serial_console] [spice] spicehtml5proxy_host = :: html5proxy_base_url = http://controller:6082/spice_auto.html enabled = True agent_enabled = True server_listen = :: server_proxyclient_address = 10.0.0.39 keymap = en-us [ssl] [trusted_computing] [upgrade_levels] [vmware] [vnc] enabled = false [workarounds] [xenserver]
Завершение установки
Проверка на аппаратное ускорение.
egrep -c '(vmx|svm)' /proc/cpuinfo
Если вывод не 0 меняем в файле /etc/nova/nova.conf строчку
virt_type = qemu
на
virt_type = kvm
проверка установки nova
su - admin . admin-openrc openstack compute service list +----+------------------+-----------+----------+---------+-------+----------------------------+ | Id | Binary | Host | Zone | Status | State | Updated At | +----+------------------+-----------+----------+---------+-------+----------------------------+ | 1 | nova-consoleauth | conroller | internal | enabled | up | 2017-05-18T09:09:12.000000 | | 2 | nova-conductor | conroller | internal | enabled | up | 2017-05-18T09:09:14.000000 | | 3 | nova-scheduler | conroller | internal | enabled | up | 2017-05-18T09:09:19.000000 | | 6 | nova-compute | compute3 | nova | enabled | up | 2017-05-18T09:09:16.000000 | +----+------------------+-----------+----------+---------+-------+----------------------------+
Настройка сетевого сервиса neutron
настраиваем управляющий узел
mysql -u root -p CREATE DATABASE neutron; GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'NEUTRON_DBPASS'; GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'NEUTRON_DBPASS'; su - admin . admin-openrc openstack user create --domain default --password-prompt neutron openstack role add --project service --user neutron admin openstack service create --name neutron --description "OpenStack Networking" network openstack endpoint create --region RegionOne network public http://controller:9696 openstack endpoint create --region RegionOne network internal http://controller:9696 openstack endpoint create --region RegionOne network admin http://controller:9696
cd /etc/neutron mv neutron.conf neutron.conf.dist cat >neutron.conf [DEFAULT] core_plugin = ml2 state_path = /var/lib/neutron log_dir = /var/log/neutron service_plugins = router allow_overlapping_ips = True transport_url = rabbit://openstack:RABBIT_PASS@controller auth_strategy = keystone notify_nova_on_port_status_changes = True notify_nova_on_port_data_changes = True [agent] root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf [cors] [cors.subdomain] [database] connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron [keystone_authtoken] signing_dir = /var/cache/neutron/keystone-signing auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = neutron password = NEUTRON_PASS [matchmaker_redis] [nova] auth_url = http://controller:35357 auth_type = password project_domain_name = Default user_domain_name = Default region_name = RegionOne project_name = service username = nova password = NOVA_PASS [oslo_concurrency] lock_path = /var/run/neutron [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_policy] [qos] [quotas] [ssl]
Настройка Modular Layer 2 (ML2)
cd /etc/neutron/plugins/ml2/ mv ml2_conf.ini ml2_conf.ini.ORIG cat > ml2_conf.ini [DEFAULT] [ml2] type_drivers = flat,vlan,vxlan tenant_network_types = vxlan mechanism_drivers = linuxbridge,l2population extension_drivers = port_security [ml2_type_flat] flat_networks = provider [ml2_type_geneve] [ml2_type_gre] [ml2_type_vlan] [ml2_type_vxlan] vni_ranges = 1:1000 [securitygroup] enable_ipset = True
cd /etc/neutron/plugins/ml2/ mv linuxbridge_agent.ini linuxbridge_agent.ini.ORIG cat >linuxbridge_agent.ini [DEFAULT] [agent] [linux_bridge] cal_interface_mappings = provider:ens18 [securitygroup] enable_security_group = True firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver [vxlan] local_ip =10.0.0.11 enable_vxlan = True l2_population = True
настройка l3 agent
cd /etc/neutron mv l3_agent.ini l3_agent.ini_ORIG cat >l3_agent.ini [DEFAULT] interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver [AGENT]
настройка DHCP агента
cd /etc/neutron mv dhcp_agent.ini dhcp_agent.ini_ORIG cat >dhcp_agent.ini [DEFAULT] interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver dhcp_delete_namespaces = True dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = True [AGENT]
наполнение базы neutron
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head"
systemctl restart openstack-nova-api.service systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-l3-agent.service systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-l3-agent.service