openstack(Train版)手动部署

#### 实验环境

| 主机名     | CPU  | 内存 | 硬盘 | 系统    | ens33     |      |
| ---------- | ---- | ---- | ---- | ------- | --------- | ---- |
| controller | 2核  | 4G   | 40G  | centos7 | 10.0.0.11 |      |
| compute    | 2核  | 4G   | 40G  | centos7 | 10.0.0.22 |      |

#### 1、环境准备

~~~ java
1、修改主机名
#控制节点controller
[root@localhost ~]# hostnamectl set-hostname controller
[root@localhost ~]# bash
[root@controller ~]# 
#计算节点compute
[root@localhost ~]# hostnamectl set-hostname compute
[root@localhost ~]# bash
[root@compute ~]#
2、配置域名解析
#控制节点controller
[root@controller ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
10.0.0.11  controller
10.0.0.22  compute
#计算节点compute
[root@compute ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
10.0.0.11  controller
10.0.0.22  compute
~~~

#### 2、关闭防火墙

~~~ java
#控制节点controller和计算节点compute
[root@controller ~]# systemctl stop firewalld
[root@controller ~]# systemctl disable firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@controller ~]# 
[root@controller ~]# systemctl stop NetworkManager
[root@controller ~]# systemctl disable NetworkManager
Removed symlink /etc/systemd/system/multi-user.target.wants/NetworkManager.service.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.nm-dispatcher.service.
Removed symlink /etc/systemd/system/network-online.target.wants/NetworkManager-wait-online.service.
[root@controller ~]# 
[root@controller ~]# setenforce 0
[root@controller ~]# 
[root@controller ~]# vi /etc/selinux/config 
[root@controller ~]# cat /etc/selinux/config 
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#     enforcing - SELinux security policy is enforced.
#     permissive - SELinux prints warnings instead of enforcing.
#     disabled - No SELinux policy is loaded.
SELINUX=disabled     //这里修改为diabled
# SELINUXTYPE= can take one of three values:
#     targeted - Targeted processes are protected,
#     minimum - Modification of targeted policy. Only selected processes are protected. 
#     mls - Multi Level Security protection.
SELINUXTYPE=targeted 
~~~

#### 3、配置时间同步

~~~ java
#控制节点controller和计算节点compute
[root@controller ~]# yum -y install chrony
#编辑配置文件
[root@controller ~]# vi /etc/chrony.conf     //添加7、8、9行内容
      1 # Use public servers from the pool.ntp.org project.
      2 # Please consider joining the pool (http://www.pool.ntp.org/join.html).
      3 server 0.centos.pool.ntp.org iburst
      4 server 1.centos.pool.ntp.org iburst
      5 server 2.centos.pool.ntp.org iburst
      6 server 3.centos.pool.ntp.org iburst
      7 server ntp1.aliyun.com iburst
      8 server ntp2.aliyun.com iburst
      9 allow 10.0.0.0/24
#重启服务并设置开机自启动
[root@controller ~]# systemctl restart chronyd
[root@controller ~]# systemctl enable chronyd
#查看同步情况
[root@controller ~]# chronyc sources -v
210 Number of sources = 6

  .-- Source mode  '^' = server, '=' = peer, '#' = local clock.
 / .- Source state '*' = current synced, '+' = combined , '-' = not combined,
| /   '?' = unreachable, 'x' = time may be in error, '~' = time too variable.
||                                                 .- xxxx [ yyyy ] +/- zzzz
||      Reachability register (octal) -.           |  xxxx = adjusted offset,
||      Log2(Polling interval) --.      |          |  yyyy = measured offset,
||                                \     |          |  zzzz = estimated error.
||                                 |    |           \
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^- tock.ntp.infomaniak.ch        1   6    73    50    +23ms[  +23ms] +/-  126ms
^- 111.230.189.174               2   6    57    52  +1087us[+1087us] +/-   73ms
^? ntp2.flashdance.cx            2   6     1    51  +9624us[+9624us] +/-   96ms
^- time.neu.edu.cn               1   6    37    53    +13ms[  +13ms] +/-   47ms
^- 120.25.115.20                 2   6    65   115    -10ms[  -16ms] +/-   58ms
^* 203.107.6.88                  2   6    37    54   +521us[-5366us] +/-   45ms
[root@controller ~]# 
~~~

#### 4、安装openstack软件包

```Java
#控制节点&计算节点
# yum -y install centos-release-openstack-train     安装openstack(train版)软件包
# yum -y install https://rdoproject.org/repos/rdo-release.rpm   #下载并安装 RDO 存储库 RPM 以启用 OpenStack 存储库
# yum -y upgrade    更新yum源
# yum -y install python-openstackclient     #安装客户端
# yum -y install openstack-selinux          #安装openstack服务策略
```

####  5、安装mariadb数据库

```Java
#控制节点
# yum -y install mariadb mariadb-server python2-PyMySQL    #安装软件包

# vi /etc/my.cnf.d/openstack.cnf     #创建和编辑文件
[mysqld]
bind-address = 10.0.0.11
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8

#启动并设置开机自启
# systemctl enable mariadb.service
# systemctl start mariadb.service

# mysql_secure_installation     #初始化数据库
空格
y
密码
密码
y
y
y
y
# mysql -uroot -p123456   #验证
```

#### 6、安装rabbitmq消息队列

```Java
#控制节点
# yum -y install rabbitmq-server    #安装软件包
#启动并设置开机自启
# systemctl enable rabbitmq-server.service
# systemctl start rabbitmq-server.service
# rabbitmqctl add_user openstack RABBIT_PASS   #添加openstack用户密码为RABBIT_PASS
# rabbitmqctl set_permissions openstack ".*" ".*" ".*"     #给用户设置权限
```

#### 7、安装memcached

```java
#控制节点
# yum -y install memcached python-memcached     #安装相关软件包
# vi /etc/sysconfig/memcached
OPTIONS="-l 127.0.0.1,::1,controller"

#启动并设置开机自启
# systemctl enable memcached.service
# systemctl start memcached.service
```

#### 8、安装etcd

```Java
#控制节点
# yum -y install etcd      #安装软件包
# vi /etc/etcd/etcd.conf
#[Member]
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://10.0.0.11:2380"
ETCD_LISTEN_CLIENT_URLS="http://10.0.0.11:2379"
ETCD_NAME="controller"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.0.0.11:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://10.0.0.11:2379"
ETCD_INITIAL_CLUSTER="controller=http://10.0.0.11:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER_STATE="new"

#启动并设置开机自启
# systemctl enable etcd
# systemctl start etcd
```

## 安装openstack服务

### 1、keystong

#### 1.1、安装和配置

```Java
#控制节点
# mysql -u root -p
MariaDB [(none)]> CREATE DATABASE keystone;

#‘KEYSTONE_DBPASS’为keystone的密码
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'KEYSTONE_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'KEYSTONE_DBPASS';

# yum -y install openstack-keystone httpd mod_wsgi     #安装软件包
# vi /etc/keystone/keystone.conf
[database]
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone       #配置数据库访问

[token]
provider = fernet    #配置 Fernet 令牌提供程序

# su -s /bin/sh -c "keystone-manage db_sync" keystone       #同步数据库
# mysql -uroot -p123456  -e "show tables from keystone"      #验证
#初始化 Fernet 密钥存储库
# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
# keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

#这里的ADMIN_PASS密码,可以替换成其他密码
# keystone-manage bootstrap --bootstrap-password ADMIN_PASS --bootstrap-admin-url http://controller:5000/v3/ --bootstrap-internal-url http://controller:5000/v3/ --bootstrap-public-url http://controller:5000/v3/ --bootstrap-region-id RegionOne     #引导 Identity 服务
  
# vi /etc/httpd/conf/httpd.conf
ServerName controller

# ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/  #创建指向该文件的链接

# systemctl enable httpd.service
# systemctl start httpd.service

#通过设置适当的环境变量来配置管理帐户
# export OS_USERNAME=admin
# export OS_PASSWORD=ADMIN_PASS
# export OS_PROJECT_NAME=admin
# export OS_USER_DOMAIN_NAME=Default
# export OS_PROJECT_DOMAIN_NAME=Default
# export OS_AUTH_URL=http://controller:5000/v3
# export OS_IDENTITY_API_VERSION=3

# env | grep OS_   #验证生效
```

#### 1.2、创建域、项目、用户和角色

```Java
#控制节点
# openstack domain create --description "An Example Domain" example    #创建名为example的域
# openstack project create --domain default --description "Service Project" service   #创建项目service
# openstack project create --domain default --description "Demo Project" myproject  #创建项目myproject
# openstack user create --domain default --password-prompt myuser    #创建myuser用户,密码123456
# openstack role create myrole    #创建角色
# openstack role add --project myproject --user myuser myrole   #将角色添加到项目和用户
```

#### 1.3、验证操作

```Java
#控制节点
# unset OS_AUTH_URL OS_PASSWORD      #取消临时环境变量

#以用户身份请求身份验证令牌
# openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name admin --os-username admin token issue     #这里的密码是ADMIN_PASS


# openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name myproject --os-username myuser token issue    #这里的密码是123456
```

#### 1.4、创建 OpenStack 客户端环境脚本

~~~ java
#控制节点

#这里的密码是用于 Identity 服务中的用户的密码ADMIN_PASS
# vi admin-openrc
#!/bin/bash
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS      
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

# vi demo-openrc
#!/bin/bash
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=myproject
export OS_USERNAME=myuser
export OS_PASSWORD=DEMO_PASS
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
    
# . admin-openrc    #使变量生效
# openstack token issue    #请求身份验证令牌
~~~

### 2、glance

#### 2.1安装和配置

```java
#控制节点
# mysql -u root -p
#创建数据库并授予访问权限,这里的密码是GLANCE_DBPASS
MariaDB [(none)]> CREATE DATABASE glance;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost'IDENTIFIED BY 'GLANCE_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';

# . admin-openrc

# openstack user create --domain default --password-prompt glance   #创建glance用户,密码设置为123456
# openstack role add --project service --user glance admin    #将角色添加到用户和项目
# openstack service create --name glance --description "OpenStack Image" image  #创建服务实体glance

#创建影响服务API端点
# openstack endpoint create --region RegionOne image public http://controller:9292
# openstack endpoint create --region RegionOne image internal http://controller:9292
# openstack endpoint create --region RegionOne image admin http://controller:9292

# yum -y install openstack-glance    #安装软件包
# vi /etc/glance/glance-api.conf
[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance

[keystone_authtoken]
www_authenticate_uri  = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = 123456

[paste_deploy]
flavor = keystone

[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/

# su -s /bin/sh -c "glance-manage db_sync" glance      #同步数据库

# mysql -uroot -p123456 -e "show tables from glance"    #验证

# systemctl enable openstack-glance-api.service && systemctl start openstack-glance-api.service

#修改配置文件
# vi /etc/httpd/conf.d/wsgi-keystone.conf
# 在最后一行添加
Alias /identity_admin /usr/bin/keystone-wsgi-admin
<Location /identity_admin>
    SetHandler wsgi-script
    Options +ExecCGI
    WSGIProcessGroup keystone-admin
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
</Location>
# systemctl restart httpd    #重启httpd服务
```

#### 2.2、验证操作

```java
#控制节点
# . admin-openrc
# yum -y install wget
# wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img    #下载镜像
#glance image-create --name "cirros" --file cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --visibility public       #上传镜像
# glance image-list     #查看镜像
```

### 3、placement

#### 3.1、安装和配置

```java
#控制节点
#创建placement数据库,密码为PLACEMENT_DBPASS
# mysql -u root -p
MariaDB [(none)]> CREATE DATABASE placement;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'PLACEMENT_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'PLACEMENT_DBPASS';

# . admin-openrc    #生效

# openstack user create --domain default --password-prompt placement     #创建placement用户,密码为123456
#将放置用户添加到具有管理员角色的服务项目
# openstack role add --project service --user placement admin

# openstack service create --name placement --description "Placement API" placement
# openstack endpoint create --region RegionOne placement public http://controller:8778
# openstack endpoint create --region RegionOne placement internal http://controller:8778
# openstack endpoint create --region RegionOne placement admin http://controller:8778

# yum -y install openstack-placement-api  #安装软件包

# vi /etc/placement/placement.conf    #修改配置文件
[placement_database]
connection = mysql+pymysql://placement:PLACEMENT_DBPASS@controller/placement
[api]
auth_strategy = keystone

[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = PLACEMENT_PASS

# vi /etc/httpd/conf.d/00-placement-api.conf
#在最下方添加
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>
# su -s /bin/sh -c "placement-manage db sync" placement   #同步数据库
# systemctl restart httpd   #重启httpd服务
完成安装
```

#### 3.2、验证安装

```java
#控制节点
# . admin-openrc     #生效
# placement-status upgrade check
```

### 4、nova

#### 4.1、控制节点安装和配置

```java
#控制节点
#创建nova、nova_api、nova_cell0数据库并授予权限,密码是NOVA_DBPASS
# mysql -u root -p
MariaDB [(none)]> CREATE DATABASE nova_api;
MariaDB [(none)]> CREATE DATABASE nova;
MariaDB [(none)]> CREATE DATABASE nova_cell0;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';

# . admin-openrc    #生效

# openstack user create --domain default --password-prompt nova     #创建nova用户,密码为123456
# openstack role add --project service --user nova admin     #将nova用户添加到admin
# openstack service create --name nova --description "OpenStack Compute" compute   #创建服务实体

#创建计算API服务终结点
# openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
# openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
# openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1

#安装软件包
# yum -y install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler

#编辑配置文件文件
# vi /etc/nova/nova.conf
[DEFAULT]
my_ip = 10.0.0.11
transport_url = rabbit://openstack:RABBIT_PASS@controller:5672/
enabled_apis = osapi_compute,metadata
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver

[api_database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api

[database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova

[api]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 123456

[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip

[glance]
api_servers = http://controller:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 123456

# su -s /bin/sh -c "nova-manage api_db sync" nova    #填充nova-api数据库
# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova     #注册数据库:cell0
# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova    #创建cell1单元格
# su -s /bin/sh -c "nova-manage db sync" nova      #填充 nova 数据库
# su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova   #验证 nova cell0 和 cell1 是否正确注册

# 开启并设置开机自启
# systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service ; systemctl start openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

# systemctl status openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service     #查看状态
```

#### 4.2、计算节点安装和配置

```java
#计算节点
# yum -y install openstack-nova-compute
# vi /etc/nova/nova.conf
[DEFAULT]
my_ip = 10.0.0.11
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:RABBIT_PASS@controller

[api]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 123456

[vnc]	
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://10.0.0.11:6080/vnc_auto.html

[glance]
api_servers = http://controller:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 123456
    
[libvirt]
hw_machine_type = x86_64=pc-i440fx-rhel7.2.0
cpu_mode = host-passthrough

# egrep -c '(vmx|svm)' /proc/cpuinfo   #如果返回值是0则需要执行一下方法,如果返回其他值就不用
# vi /etc/nova/nova.conf
[libvirt]
virt_type = qemu

# systemctl enable libvirtd.service openstack-nova-compute.service ; systemctl start libvirtd.service openstack-nova-compute.service
# systemctl status libvirtd.service openstack-nova-compute.service
===========================================controller==================================================
#控制节点
# . admin-openrc
# openstack compute service list --service nova-compute
# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova

注意!!!!!!!!

添加新的计算节点时,必须在控制器节点上运行才能注册这些新的计算 节点。或者,您可以在以下位置设置适当的间隔
# vi /etc/nova/nova.conf
[scheduler]
discover_hosts_in_cells_interval = 300
```

#### 4.3、验证操作

```java
#控制节点
# . admin-openrc    #生效
# openstack compute service list     #列出每个组件成功启动和注册的过程
# openstack catalog list     #列出openstack的组件
# openstack image list        #列出镜像列表
# nova-status upgrade check     #检查单元格和放置 API 是否成功工作
```

###  4、neutron

#### 4.1、安装配置控制节点

本篇笔记使用的是:提供商网络

```java
#控制节点
#创建数据库并授予权限,密码为NEUTRON_DBPASS
# mysql -u root -p
MariaDB [(none)] CREATE DATABASE neutron;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'NEUTRON_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'NEUTRON_DBPASS';

# . admin-openrc      #生效
# openstack user create --domain default --password-prompt neutron    #创建neutron用户,密码为123456
# openstack role add --project service --user neutron admin     #将neutron用户添加到admin

#创建neutron服务实体
# openstack service create --name neutron --description "OpenStack Networking" network

#创建网络服务API端点
# openstack endpoint create --region RegionOne network public http://controller:9696
# openstack endpoint create --region RegionOne network internal http://controller:9696
# openstack endpoint create --region RegionOne network admin http://controller:9696

# yum -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables  #安装服务
# 配置neutron文件
# !!!注意:这里的的文件可能会有残缺,可以使用以下网址查找相应完整的文件
# https://docs.openstack.org/ocata/config-reference/networking/samples/neutron.conf.html
# vi /etc/neutron/neutron.conf
[database]
connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron

[DEFAULT]
core_plugin = ml2
service_plugins =
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true


[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 123456

[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = 123456

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp


# 配置模块化第 2 层 (ML2) 插件
# 此配置文件不完整建议从以下链接复制
# https://docs.openstack.org/ocata/config-reference/networking/samples/ml2_conf.ini.html
# vi /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan
tenant_network_types =
mechanism_drivers = linuxbridge
extension_drivers = port_security


[ml2_type_flat]
flat_networks = extnet

[securitygroup]
enable_ipset = true

# 配置 Linux 网桥代理
# 此配置文件残缺
# https://docs.openstack.org/ocata/config-reference/networking/samples/linuxbridge_agent.ini.html
# vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = extnet:ens33

[vxlan]
enable_vxlan = false

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

# modprobe br_netfilter         #加载br_netfilter网络模块

# vi /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1

# sysctl -p    #使iptables生效

#配置 DHCP 代理
# vi /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true

# 配置元数据代理
#这里的密码是METADATA_SECRET 这个密码可以更改成自己的密码
# vi /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = METADATA_SECRET

#将计算服务配置为使用网络服务
#这里的neutron密码是123456,下面的密码是元数据的密码
# vi /etc/nova/nova.conf
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 123456
service_metadata_proxy = true
metadata_proxy_shared_secret = METADATA_SECRET

#配置软连接
# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

#同步数据库
# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

# systemctl restart openstack-nova-api.service

# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
```

#### 4.2、安装配置计算节点

```
#计算节点
# yum -y install openstack-neutron-linuxbridge ebtables ipset   #安装组件
#修改配置文件
# vi /etc/neutron/neutron.conf
[DEFAULT]
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 123456

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

#配置 Linux 网桥代理
# https://docs.openstack.org/ocata/config-reference/networking/samples/linuxbridge_agent.ini.html
# vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = extnet:ens33

[vxlan]
enable_vxlan = false

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

# modprobe br_netfilter         #加载br_netfilter网络模块

# vi /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1

#将计算服务配置为使用网络服务
# vi /etc/nova/nova.conf
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 123456

# systemctl restart openstack-nova-compute.service    #重启计算服务
# systemctl enable neutron-linuxbridge-agent.service ; systemctl start neutron-linuxbridge-agent.service    #启动网桥代理并设置开机自启
```

#### 4.3、验证操作

~~~ java
#控制节点
# openstack network agent list      #验证
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| ID                                   | Agent Type         | Host       | Availability Zone | Alive | State | Binary                    |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| 242f894d-c894-4432-b2a3-1c3bcff5c202 | Metadata agent     | controller | None              | :-)   | UP    | neutron-metadata-agent    |
| 4c55cb05-43af-46ba-8cf7-7f329564b189 | Linux bridge agent | controller | None              | :-)   | UP    | neutron-linuxbridge-agent |
| 7c1567ab-6cb2-48de-82e3-32e39f16b11c | Linux bridge agent | compute    | None              | :-)   | UP    | neutron-linuxbridge-agent |
| fa653a3f-7943-4dd0-93ba-cdaa505f891f | DHCP agent         | controller | nova              | :-)   | UP    | neutron-dhcp-agent        |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
~~~

### 5、dashboard

#### 5.1、安装和配置

```java
#控制节点
# yum -y install openstack-dashboard       #安装软件包
# vi /etc/openstack-dashboard/local_settings     #编辑配置文件
OPENSTACK_HOST = "controller"

ALLOWED_HOSTS = ['*']

CACHES = {
    'default': {
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
         'LOCATION': 'controller:11211',
    }
}

SESSION_ENGINE = 'django.contrib.sessions.backends.cache'

OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST

OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True

OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 3,
}

OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
WEBROOT = "/dashboard"
OPENSTACK_NEUTRON_NETWORK = {
    ...
    'enable_router': False,
    'enable_quotas': False,
    'enable_distributed_router': False,
    'enable_ha_router': False,
    'enable_lb': False,
    'enable_firewall': False,
    'enable_vpn': False,
    'enable_fip_topology_check': False,
}

TIME_ZONE = "Asia/Shanghai"

# vi /etc/httpd/conf.d/openstack-dashboard.conf
WSGIApplicationGroup %{GLOBAL}

# systemctl restart httpd.service memcached.service
#浏览器访问http://10.0.0.11/dashboard
```

### 6、启动实例

#### 6.1、创建实例

```java
#控制节点
#创建虚拟网络
# . admin-openrc
# openstack network create  --share --external --provider-physical-network extnet --provider-network-type flat flat-extnet

# 创建子网
# openstack subnet create --network flat-extnet --allocation-pool start=10.0.0.10,end=10.0.0.60 --dns-nameserver 8.8.8.8 --gateway 10.0.0.2 --subnet-range 10.0.0.0/24 flat-subnet

创建一个实例
# 创建 m1.nano 风格
最小的默认风格每个实例消耗 512 MB 内存。为 计算节点包含小于 4 GB 内存的环境,我们 建议创建每个只需要 64 MB 的风格 实例。仅将此风格与 CirrOS 映像一起使用以进行测试 目的。m1.nano

# openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano

# 生成秘钥对
# . demo-openrc
# 生成密钥对并添加公钥
# ssh-keygen -q -N ""     回车
# openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
# openstack keypair list         #验证密钥对的添加

#添加安全组的规则
# openstack security group rule create --proto icmp default   #添加ping功能
# openstack security group rule create --proto tcp --dst-port 22 default    #添加远程访问功能

#创建和启动实例
# . demo-openrc
# openstack flavor list      #列出可用资源
# openstack image list      #列出镜像
# openstack network list     #列出可用的网络
# openstack security group list       #列出可用的安全组

#启动实例
# openstack server create --flavor m1.nano --image cirros-0.3.5 --nic net-id=87785aa3-d4ae-4873-be39-dcdb7841da7e --security-group default --key-name mykey vm1

# openstack server list      #查看实例状态
# openstack console url show provider-instance   #获取虚拟机控制台访问连接
```

  • 6
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

My0117

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值