openstack部署


stein  centos7.4 1708

1 系统调整

1.1 修改sysctl

fs.file-max = 65536  

net.ipv4.ip_forward = 1  

net.ipv4.conf.all.rp_filter = 0  

net.ipv4.conf.default.rp_filter = 0

1.2 网卡配置

em1 (management)

TYPE=Ethernet

PROXY_METHOD=none

BROWSER_ONLY=no

BOOTPROTO=static

DEFROUTE=yes

NAME=em1

UUID=947ec8e4-f341-4c32-a932-4c70fdfe8d58

DEVICE=em1

ONBOOT=yes

IPADDR=10.0.0.11

NETMASK=255.255.255.0

GATEWAY=10.0.0.1

DNS1=223.5.5.5

em2 (overlay)

TYPE=Ethernet

PROXY_METHOD=none

BROWSER_ONLY=no

BOOTPROTO=static

NAME=em2

UUID=8da98a51-15d0-4522-beb2-1f78b1f160f9

DEVICE=em2

ONBOOT=yes

IPADDR=10.0.1.11

NETMASK=255.255.255.0

em3 (field)

TYPE=Ethernet

BOOTPROTO=none

DEVICE=em3

ONBOOT=yes

em4 (plc)

TYPE=Ethernet

BOOTPROTO=none

DEVICE=em4

ONBOOT=yes

1.3 添加hosts

10.0.0.10 web

10.0.0.11 controller

10.0.0.12 compute1

10.0.0.13 compute2

1.4 时间同步ntp

controller:

yum -y install ntp  ntpdate

vi /etc/ntp.conf (修改地址)

systemctl start ntpd && systemctl enable ntpd

other:

yum -y install ntpdate

# crontab -e 添加定时任务

*/5 * * * * /usr/sbin/ntpdate controller &>/tmp/ntp.log

#...

restrict compute1

server ntp.aliyun.com iburst

server 127.127.1.0 fudge

127.127.1.0 startum 8

1.5 修改ulimit

```

# End of file

* soft nofile 655350  #任何用户可以打开的最大的文件描述符数量,默认1024,这里的数值会限制tcp连接

* hard nofile 655350

* soft nproc  655350  #任何用户可以打开的最大进程数

* hard nproc  650000

2 pre

2.1 OpenStack packages for CentOS  (all node)

在线源

yum install -y centos-release-openstack-stein

# 更新,If the upgrade process includes a new kernel, reboot your host to activate it.

# yum upgrade

yum install python-openstackclient -y

yum install openstack-selinux -y

2.2 SQL database (contronller)

yum install mariadb mariadb-server python2-PyMySQL

Create and edit the /etc/my.cnf.d/openstack.cnf file (backup existing configuration files in /etc/my.cnf.d/ if needed) and complete the following actions:

[mysqld]

bind-address = controller

default-storage-engine = innodb

innodb_file_per_table = on

max_connections = 4096

collation-server = utf8_general_ci

character-set-server = utf8

重启

mkdir -p /usr/lib/systemd/system/mariadb.service.d/

echo -e "[Service]\nLimitNOFILE=10000" |tee /usr/lib/systemd/system/mariadb.service.d/limits.conf

systemctl daemon-reload

systemctl enable mariadb.service

systemctl start mariadb.service

初始化

mysql_secure_installation

检查

# 默认最大连接数512,检查是否更换为4096

mysql -u root -p

MariaDB [(none)]> show variables like '%max_connections%';

+-----------------------+-------+

| Variable_name         | Value |

+-----------------------+-------+

| extra_max_connections | 1     |

| max_connections       | 4096  |

+-----------------------+-------+

2 rows in set (0.002 sec)

2.3 Message queue (contronller)

yum install -y rabbitmq-server

systemctl enable rabbitmq-server.service

systemctl start rabbitmq-server.service

rabbitmqctl add_user openstack RABBIT_PASS

rabbitmqctl set_permissions openstack ".*" ".*" ".*"

2.4 memcache (contronller)

 yum install memcached python-memcached -y

 /etc/sysconfig/memcached

#配置 OPTIONS="-l 127.0.0.1,controller"

systemctl enable memcached.service

systemctl start memcached.service

配置防火墙,限制访问

iptables -A INPUT  -s ${CONTROLLER_IP}/32  -p tcp -m tcp --dport 11211 -j ACCEPT

iptables -A INPUT  -s ${COMPUTE_IP}/32  -p tcp -m tcp --dport 11211 -j ACCEPT

iptables -A INPUT -p tcp -m tcp --dport 11211 -j DROP

# 可以将命令写入/etc/rc.d/rc.local ,开机自动加载规则

# chmod +x /etc/rc.d/rc.local

2.5 etcd (contronller)

yum install etcd -y

# 修改配置

vi /etc/etcd/etcd.conf

#启动

systemctl enable etcd

systemctl start etcd

ETCD_DATA_DIR="/var/lib/etcd/default.etcd"

ETCD_LISTEN_PEER_URLS="http://CONTROLLER_IP:2380"

ETCD_LISTEN_CLIENT_URLS="http://CONTROLLER_IP:2379"

ETCD_NAME="controller"

#[Clustering]

ETCD_INITIAL_ADVERTISE_PEER_URLS="http://CONTROLLER_IP:2380"

ETCD_ADVERTISE_CLIENT_URLS="http://CONTROLLER_IP:2379"

ETCD_INITIAL_CLUSTER="controller=http://CONTROLLER_IP:2380"

ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"

ETCD_INITIAL_CLUSTER_STATE="new"

3 openstack部署

3.1 keystone

https://docs.openstack.org/keystone/stein/install/keystone-install-rdo.html#prerequisites

# 创建数据库

mysql -u root -p

MariaDB [(none)]> CREATE DATABASE keystone;

MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \

IDENTIFIED BY 'KEYSTONE_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \

IDENTIFIED BY 'KEYSTONE_DBPASS';

# 安装

yum install

openstack-keystone httpd mod_wsgi -y

# 配置文件

vi /etc/keystone/keystone.conf

# 数据导入

su -s /bin/sh -c "keystone-manage db_sync" keystone

# Initialize Fernet key repositories

keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone

keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

# 创建admin用户

keystone-manage bootstrap --bootstrap-password ADMIN_PASS \

  --bootstrap-admin-url http://controller:5000/v3/ \

  --bootstrap-internal-url http://controller:5000/v3/ \

  --bootstrap-public-url http://controller:5000/v3/ \

  --bootstrap-region-id RegionOne

  

# 配置访问

sed -i '95a ServerName controller' /etc/httpd/conf/httpd.conf

ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

# 启动

systemctl enable httpd.service &&  systemctl start httpd.service

Configure the administrative account

export OS_PROJECT_DOMAIN_NAME=Default

export OS_USER_DOMAIN_NAME=Default

export OS_PROJECT_NAME=admin

export OS_USERNAME=admin

export OS_PASSWORD=ADMIN_PASS

export OS_AUTH_URL=http://controller:5000/v3

export OS_IDENTITY_API_VERSION=3

export OS_IMAGE_API_VERSION=2

This guide uses a service project that contains a unique user for each service that you add to your environment. Create the service project:

source admin-openrc

openstack project create --domain default \

  --description "Service Project" service

配置文件模板

[database]

# ...

connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone

[token]

# ...

provider = fernet

3.2 glance

OpenStack Docs: Install and configure (Red Hat)

# 创建数据库

mysql -u root -p

MariaDB [(none)]> CREATE DATABASE glance;

MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \

  IDENTIFIED BY 'GLANCE_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \

  IDENTIFIED BY 'GLANCE_DBPASS';

source admin-openrc

# 创建keystone用户 ,输入GLANCE_PASS

openstack user create --domain default --password-prompt glance

# 用户权限

openstack role add --project service --user glance admin

# 注册服务

openstack service create --name glance --description "OpenStack Image" image

openstack endpoint create --region RegionOne image public http://controller:9292

openstack endpoint create --region RegionOne image internal http://controller:9292

openstack endpoint create --region RegionOne image admin http://controller:9292

# 安装

yum install openstack-glance -y

# 配置文件

vi /etc/glance/glance-api.conf

vi /etc/glance/glance-registry.conf

# 数据导入

su -s /bin/sh -c "glance-manage db_sync" glance

# 启动

systemctl enable openstack-glance-api.service \

  openstack-glance-registry.service

systemctl start openstack-glance-api.service \

  openstack-glance-registry.service

配置文件模板

[database]

# ...

connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance

[keystone_authtoken]

# ...

www_authenticate_uri  = http://controller:5000

auth_url = http://controller:5000

memcached_servers = controller:11211

auth_type = password

project_domain_name = Default

user_domain_name = Default

project_name = service

username = glance

password = GLANCE_PASS

[paste_deploy]

# ...

flavor = keystone

[glance_store]

# ...

stores = file,http

default_store = file

filesystem_store_datadir = /var/lib/glance/images/

[database]

# ...

connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance

[keystone_authtoken]

# ...

www_authenticate_uri = http://controller:5000

auth_url = http://controller:5000

memcached_servers = controller:11211

auth_type = password

project_domain_name = Default

user_domain_name = Default

project_name = service

username = glance

password = GLANCE_PASS

[paste_deploy]

# ...

flavor = keystone

3.3 Placement

OpenStack Docs: Placement Service

# 创建数据库

mysql -u root -p

MariaDB [(none)]> CREATE DATABASE placement;

MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' \

  IDENTIFIED BY 'PLACEMENT_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' \

  IDENTIFIED BY 'PLACEMENT_DBPASS';

source admin-openrc

# 创建keystone用户 ,输入 PLACEMENT_PASS

openstack user create --domain default --password-prompt placement

# 用户权限

openstack role add --project service --user placement admin

# 注册服务

openstack service create --name placement --description "Placement API" placement

openstack endpoint create --region RegionOne placement public http://controller:8778

openstack endpoint create --region RegionOne placement internal http://controller:8778

openstack endpoint create --region RegionOne placement admin http://controller:8778

# 安装

 yum install openstack-placement-api -y

# 配置文件

vi /etc/placement/placement.conf

# 数据导入

su -s /bin/sh -c "placement-manage db sync" placement

# 启动

systemctl restart httpd

# 校验安装

placement-status upgrade check

配置文件模板

[placement_database]

# ...

connection = mysql+pymysql://placement:PLACEMENT_DBPASS@controller/placement

[api]

# ...

auth_strategy = keystone

[keystone_authtoken]

# ...

auth_url = http://controller:5000/v3

memcached_servers = controller:11211

auth_type = password

project_domain_name = Default

user_domain_name = Default

project_name = service

username = placement

password = PLACEMENT_PASS

3.4 nova

3.4.1 controller

OpenStack Docs: Install and configure controller node for Red Hat Enterprise Linux and CentOS

# 创建数据库

mysql -u root -p

MariaDB [(none)]> CREATE DATABASE nova_api;

MariaDB [(none)]> CREATE DATABASE nova;

MariaDB [(none)]> CREATE DATABASE nova_cell0;

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \

  IDENTIFIED BY 'NOVA_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \

  IDENTIFIED BY 'NOVA_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \

  IDENTIFIED BY 'NOVA_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \

  IDENTIFIED BY 'NOVA_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \

  IDENTIFIED BY 'NOVA_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \

  IDENTIFIED BY 'NOVA_DBPASS';

  

source admin-openrc

# 创建keystone用户 ,输入 NOVA_PASS

openstack user create --domain default --password-prompt nova

# 用户权限

openstack role add --project service --user nova admin

# 注册服务

openstack service create --name nova --description "OpenStack Compute" compute

openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1

openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1

openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1

# 安装  

yum install --disablerepo="epel" openstack-nova-api openstack-nova-conductor \

  openstack-nova-novncproxy openstack-nova-scheduler

# 配置文件

vi /etc/nova/nova.conf

# 数据导入

su -s /bin/sh -c "nova-manage api_db sync" nova

su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova

su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova

su -s /bin/sh -c "nova-manage db sync" nova

su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova

# 启动

systemctl enable openstack-nova-api.service \

  openstack-nova-scheduler.service \

  openstack-nova-conductor.service \

  openstack-nova-novncproxy.service

systemctl start openstack-nova-api.service \

  openstack-nova-scheduler.service \

  openstack-nova-conductor.service \

  openstack-nova-novncproxy.service

配置文件模板

[DEFAULT]

# ...

enabled_apis = osapi_compute,metadata

transport_url = rabbit://openstack:RABBIT_PASS@controller

my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS

use_neutron = true

firewall_driver = nova.virt.firewall.NoopFirewallDriver

[api_database]

# ...

connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api

[database]

# ...

connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova

[api]

# ...

auth_strategy = keystone

[keystone_authtoken]

# ...

auth_url = http://controller:5000/v3

memcached_servers = controller:11211

auth_type = password

project_domain_name = Default

user_domain_name = Default

project_name = service

username = nova

password = NOVA_PASS

[vnc]

enabled = true

# ...

server_listen = $my_ip

server_proxyclient_address = $my_ip

[glance]

# ...

api_servers = http://controller:9292

[oslo_concurrency]

# ...

lock_path = /var/lib/nova/tmp

[placement]

# ...

region_name = RegionOne

project_domain_name = Default

project_name = service

auth_type = password

user_domain_name = Default

auth_url = http://controller:5000/v3

username = placement

password = PLACEMENT_PASS

3.4.2 compute

OpenStack Docs: Install and configure a compute node for Red Hat Enterprise Linux and CentOS

# 安装  

yum install --disablerepo="epel" openstack-nova-compute -y

# 配置文件

vi /etc/nova/nova.conf

# 启动

systemctl enable libvirtd.service openstack-nova-compute.service

systemctl start  libvirtd.service openstack-nova-compute.service

配置文件模板

[DEFAULT]

# ...

enabled_apis = osapi_compute,metadata

transport_url = rabbit://openstack:RABBIT_PASS@controller

my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS

use_neutron = true

firewall_driver = nova.virt.firewall.NoopFirewallDriver

[api]

# ...

auth_strategy = keystone

[keystone_authtoken]

# ...

auth_url = http://controller:5000/v3

memcached_servers = controller:11211

auth_type = password

project_domain_name = Default

user_domain_name = Default

project_name = service

username = nova

password = NOVA_PASS

[vnc]

# ...

enabled = true

server_listen = 0.0.0.0

server_proxyclient_address = $my_ip

novncproxy_base_url = http://CONTROLLER_IP:6080/vnc_auto.html

[glance]

# ...

api_servers = http://controller:9292

[oslo_concurrency]

# ...

lock_path = /var/lib/nova/tmp

[placement]

# ...

region_name = RegionOne

project_domain_name = Default

project_name = service

auth_type = password

user_domain_name = Default

auth_url = http://controller:5000/v3

username = placement

password = PLACEMENT_PASS

3.4.3 compute节点注册

# controller 操作

openstack compute service list --service nova-compute

su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova

# 配置自动发现

vi /etc/nova/nova.conf

# 重启服务

systemctl restart openstack-nova-api.service \

  openstack-nova-scheduler.service

# ...

[scheduler]

discover_hosts_in_cells_interval = 300

3.4.4 验证安装

# controller 操作

# 配置

vi /etc/httpd/conf.d/00-placement-api.conf

systemctl restart httpd

# 验证

nova-status upgrade check

<VirtualHost *:8778>

# ...

  <Directory /usr/bin>

     <IfVersion >= 2.4>

        Require all granted

     </IfVersion>

     <IfVersion < 2.4>

        Order allow,deny

        Allow from all

     </IfVersion>

  </Directory>

</VirtualHost>

3.4.5 controller nova调度规则优化 (非必要)

# ...

# 修改调度策略

[filter_scheduler]

weight_classes = nova.scheduler.weights.io_ops.IoOpsWeigher

io_ops_weight_multiplier = -1.0

systemctl restart openstack-nova-scheduler.service

3.4.6 controller nova其他优化 (非必要)

[DEFAULT]

# ...

service_down_time = 120

cpu_allocation_ratio = 3.0

ram_allocation_ratio = 1.5

reserved_host_disk_mb = 2048

reserved_host_memory_mb = 2048

allow_resize_to_same_host = True

remove_unused_base_images = False

# remove_unused_original_minimum_age_seconds = 864000

image_cache_manager_interval = 0

resume_guests_state_on_host_boot = True

systemctl restart openstack-nova-api.service

*_allocation_ratio 参数配置方式:

a、AggregateCoreFilter的cpu_allocation_ratio metadata key

     使用:nova aggregate-set-metadata 1 *_allocation_ratio=2.0

b、compute node的配置文件nova.conf支持*_allocation_ratio参数设置

c、原本的controller node nova.conf的*_allocation_ratio参数设置

优先级:a > b > c

3.5 dashboard

OpenStack Docs: Install and configure for Red Hat Enterprise Linux and CentOS

#安装

yum install openstack-dashboard -y

#配置文件

vi /etc/openstack-dashboard/local_settings

sed -i '3a WSGIApplicationGroup %{GLOBAL}' /etc/httpd/conf.d/openstack-dashboard.conf

# 重启

systemctl restart httpd.service memcached.service

# ...

OPENSTACK_HOST = "controller"

ALLOWED_HOSTS = ['one.example.com', 'two.example.com']

SESSION_ENGINE = 'django.contrib.sessions.backends.cache'

CACHES = {

    'default': {

         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',

         'LOCATION': 'controller:11211',

    }

}

OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST

OPENSTACK_API_VERSIONS = {

    "identity": 3,

    "image": 2,

    "volume": 3,

}

OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"

OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"

OPENSTACK_NEUTRON_NETWORK = {

    ...

    'enable_ipv6': False,

}

TIME_ZONE = "Asia/Shanghai"

3.6 network

OpenStack Docs: Open vSwitch mechanism driver

3.6.1 controller 创建数据库

# 创建数据库

mysql -u root -p

MariaDB [(none)] CREATE DATABASE neutron;

MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \

  IDENTIFIED BY 'NEUTRON_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \

  IDENTIFIED BY 'NEUTRON_DBPASS';

source admin-openrc

# 创建keystone用户 ,输入 NEUTRON_PASS

openstack user create --domain default --password-prompt neutron

# 用户权限

openstack role add --project service --user neutron admin

# 注册服务

openstack service create --name neutron --description "OpenStack Networking" network

openstack endpoint create --region RegionOne network public http://controller:9696

openstack endpoint create --region RegionOne network internal http://controller:9696

openstack endpoint create --region RegionOne network admin http://controller:9696

3.6.2 network node

  • network服务可以和controller服务部署在同一节点

# 安装

yum install openstack-neutron openstack-neutron-ml2 \

  openstack-neutron-openvswitch ebtables

  

# 更改配置文件

vi /etc/neutron/neutron.conf

vi /etc/neutron/plugins/ml2/ml2_conf.ini

# 配置ovs bridge

yum install -y libibverbs

systemctl enable neutron-openvswitch-agent.service

systemctl start neutron-openvswitch-agent.service

ovs-vsctl add-br br-provider

ovs-vsctl add-port br-provider PROVIDER_INTERFACE

# 更改配置文件

vi /etc/neutron/plugins/ml2/openvswitch_agent.ini

vi /etc/neutron/l3_agent.ini

ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

# 导入数据

su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \

  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

# 启动

systemctl enable neutron-server.service neutron-l3-agent.service

systemctl start neutron-server.service neutron-l3-agent.service

systemctl restart neutron-openvswitch-agent.service

nova启用neutron

vi /etc/nova/nova.conf

[neutron]

# ...

auth_url = http://controller:5000

auth_type = password

project_domain_name = default

user_domain_name = default

region_name = RegionOne

project_name = service

username = neutron

password = NEUTRON_PASS

service_metadata_proxy = true

metadata_proxy_shared_secret = METADATA_SECRET

systemctl restart openstack-nova-api.service

配置文件模板

[database]

connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron

[DEFAULT]

core_plugin = ml2

service_plugins = router

allow_overlapping_ips = true

transport_url = rabbit://openstack:RABBIT_PASS@controller

auth_strategy = keystone

notify_nova_on_port_status_changes = true

notify_nova_on_port_data_changes = true

dhcp_agents_per_network = 2

[keystone_authtoken]

www_authenticate_uri = http://controller:5000

auth_url = http://controller:5000

memcached_servers = controller:11211

auth_type = password

project_domain_name = default

user_domain_name = default

project_name = service

username = neutron

password = NEUTRON_PASS

[nova]

auth_url = http://controller:5000

auth_type = password

project_domain_name = default

user_domain_name = default

region_name = RegionOne

project_name = service

username = nova

password = NOVA_PASS

[oslo_concurrency]

lock_path = /var/lib/neutron/tmp

[ml2]

# ...

type_drivers = flat,vlan,vxlan

tenant_network_types = vxlan

mechanism_drivers = openvswitch,l2population

extension_drivers = port_security

[ml2_type_flat]

# ...

flat_networks = provider

[ml2_type_vxlan]

# ...

vni_ranges = 1:1000

[securitygroup]

# ...

enable_ipset = true

[ovs]

bridge_mappings = provider:br-provider

local_ip = OVERLAY_INTERFACE_IP_ADDRESS

[agent]

tunnel_types = vxlan

l2_population = True

[securitygroup]

firewall_driver = iptables_hybrid

[DEFAULT]

interface_driver = openvswitch

3.6.3 compute

# 安装

yum -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch ebtables

# 修改配置文件

vi /etc/neutron/neutron.conf

vi /etc/neutron/plugins/ml2/openvswitch_agent.ini

vi /etc/neutron/dhcp_agent.ini

vi /etc/neutron/metadata_agent.ini

# 配置ovs bridge

yum install -y libibverbs

systemctl enable neutron-openvswitch-agent.service

systemctl start neutron-openvswitch-agent.service

ovs-vsctl add-br br-provider

ovs-vsctl add-port br-provider PROVIDER_INTERFACE

# 启动

systemctl enable neutron-dhcp-agent.service neutron-metadata-agent.service

systemctl start neutron-dhcp-agent.service neutron-metadata-agent.service

systemctl restart neutron-openvswitch-agent.service

nova启用neutron


vi /etc/nova/nova.conf

[neutron]

# ...

auth_url = http://controller:5000

auth_type = password

project_domain_name = default

user_domain_name = default

region_name = RegionOne

project_name = service

username = neutron

password = NEUTRON_PASS

systemctl restart openstack-nova-compute.service

配置文件模板

[DEFAULT]

transport_url = rabbit://openstack:RABBIT_PASS@controller

core_plugin = ml2

auth_strategy = keystone

[keystone_authtoken]

# ...

www_authenticate_uri = http://controller:5000

auth_url = http://controller:5000

memcached_servers = controller:11211

auth_type = password

project_domain_name = default

user_domain_name = default

project_name = service

username = neutron

password = NEUTRON_PASS

[oslo_concurrency]

# ...

lock_path = /var/lib/neutron/tmp

[ovs]

bridge_mappings = provider:br-provider

local_ip = OVERLAY_INTERFACE_IP_ADDRESS

[securitygroup]

firewall_driver = iptables_hybrid

[agent]

tunnel_types = vxlan

l2_population = True

[DEFAULT]

interface_driver = openvswitch

enable_isolated_metadata = True

force_metadata = True

[DEFAULT]

nova_metadata_host = controller

metadata_proxy_shared_secret = METADATA_SECRET

3.6.4 验证安装

openstack network agent list

Agent Type

Host

State

Metadata agent

compute

UP

DHCP agent

compute

UP

Open vSwitch agent

compute

UP

L3 agent

controller

UP

Open vSwitch agent

controller

UP

3.7 cinder

controller

OpenStack Docs: Install and configure controller node

systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service

systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service

compute

LVM

OpenStack Docs: Install and configure a storage node

CEPH

https://access.redhat.com/documentation/zh-cn/red_hat_ceph_storage/4/pdf/block_device_to_openstack_guide/red_hat_ceph_storage-4-block_device_to_openstack_guide-zh-cn.pdf

NOTE: ceph 文档是基于red hat openstack 写的,实际部署时,要以公版stein openstack为准

cinder配置参考: https://docs.openstack.org/cinder/stein/drivers.html#cephbackupdriver

# storage

systemctl start  openstack-cinder-volume.service target.service

# backup

systemctl start openstack-cinder-backup.service

4 安装Taas

OpenStack 云平台流量监控插件tap-as-a-service(Taas)安装步骤(OpenStack queens版本,非devstack)-CSDN博客

4.1 network node

cd /srv

git clone -b stable/stein https://github.com/openstack/tap-as-a-service.git

cd tap-as-a-service

PBR_VERSION=5.8.0 python setup.py install

修改配置文件

vi /etc/neutron/neutron.conf

[DEFAULT]

...

service_plugins = router,taas

[service_providers]

service_provider = TAAS:TAAS:neutron_taas.services.taas.service_drivers.taas_rpc.TaasRpcDriver:default

同步数据库并重启服务

neutron-db-manage --subproject tap-as-a-service upgrade head

systemctl restart neutron-server

# 验证安装

neutron tap-service-list

4.2 compute node

安装taas

cd /srv

git clone -b stable/stein https://github.com/openstack/tap-as-a-service

cd tap-as-a-service

PBR_VERSION=5.8.0 python setup.py install

修改配置文件

/etc/neutron/plugins/ml2/openvswitch_agent.ini

[agent]

extensions = taas

重启服务

systemctl restart neutron-openvswitch-agent.service

4.3 controller node (此节内容已舍弃,仅为存档)

添加taas dashboard

git clone https://opendev.org/x/tap-as-a-service-dashboard.git

cd tap-as-a-service-dashboard

python setup.py build

python setup.py install

找到dashboard安装的位置,拷贝里面的内容到dashboard目录

cp /usr/lib/python2.7/site-packages/neutron_taas_dashboard/enabled/_90_project_tapservices_panel.py /usr/share/openstack-dashboard/openstack_dashboard/local/enabled/

# 重启httpd

systemctl restart httpd memcached

5 安装zun

控制节点

# 安装kuryr

https://docs.openstack.org/kuryr-libnetwork/stein/install/controller-install.html

# 安装zun

https://docs.openstack.org/zun/stein/install/controller-install.html

#因python2不在维护,zun requirements会有依赖问题,pip install 之后需要手动调整包版本

pip install --upgrade docker==4.4.4

pip install --upgrade websocket-client==0.59.0

pip install --upgrade PyYAML==5.4.1

systemctl enable zun-api.service zun-wsproxy.service

systemctl start  zun-api.service zun-wsproxy.service

计算节点

# 安装docker

curl http://download.bolean.com/common/docker/docker-centos7.sh|bash

# 安装kuryr

https://docs.openstack.org/kuryr-libnetwork/stein/install/compute-install-ubuntu.html

# note: stable/stein已经停止维护

git clone https://git.openstack.org/openstack/kuryr-libnetwork.git

cd kuryr-libnetwork

git checkout stein-em

#因python2不在维护,requirements会有依赖问题,pip install 之后需要手动调整包版本

pip install --upgrade pip==20.3.4

pip install --upgrade kuryr-lib==2.0.1

pip install --upgrade ipaddress==1.0.18

# note:  centos命令路径/usr/libexec   ubuntu命令路径 /usr/local/libexec

# note:  /etc/kuryr/kuryr.conf

auth_url = http://controller:5000

# 安装zun

https://docs.openstack.org/zun/stein/install/compute-install.html

#因python2不在维护,requirements会有依赖问题,pip install 之后需要手动调整包版本

pip install --upgrade docker==4.4.4

pip install --upgrade websocket-client==0.59.0

pip install --upgrade PyYAML==5.4.1

https://docs.openstack.org/kuryr-libnetwork/stein/install/compute-install-ubuntu.html

zun-compute.service  \

kuryr-libnetwork.service   

note

下发容器查看详细信息错误如下: openstack appcontainer show contain

Docker internal error: 500 Server Error for http+docker://localhost/v1.26/containers/cb2ec03013c1add6156f5273a44559f16e4f414fa519d7b9ee1a5be408a37610/start: Internal Server Error ("failed to create endpoint zun-c6508841-5f49-4f4e-83dc-8a14a0d02e1a on network 3da7e9b0-3335-4b19-936b-a498d864a3b2: NetworkDriver.CreateEndpoint: vif_type(ovs) is not supported. A binding script for this type can't be found")

 解决

chmod 777 /var/run/openvswitch/db.sock

systemctl restart kuryr-libnetwork

chmod 777 /var/run/docker.sock

systemctl daemon-reload

systemctl restart docker

systemctl restart zun-compute

systemctl restart neutron-openvswitch-agent

zun-ui (控制节点)

git clone https://github.com/openstack/zun-ui

 

cd zun-ui/

git checkout stein-em

pip install .

cp zun_ui/enabled/* /usr/share/openstack-dashboard/openstack_dashboard/local/enabled/

python /usr/share/openstack-dashboard/manage.py collectstatic

python /usr/share/openstack-dashboard/manage.py compress

systemctl restart httpd

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值