Linux HA-OpenStack架构搭建详解

环境准备

  • 密码统一六个零
主机名IP
controller110.0.0.10
controller210.0.0.11
compute110.0.0.12
compute210.0.0.13
data110.0.0.14
data210.0.0.15
haproxy110.0.0.16
haproxy210.0.0.17
客户端系统虚拟化工具操作系统
Windows11VMware15.5proCentos7.9

安装基本工具

  • 所有机器
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config; systemctl disable firewalld

yum install vim iotop bc gcc gcc-c++ glibc glibc-devel pcre pcre-devel openssl  openssl-devel zip unzip zlib-devel  net-tools lrzsz tree ntpdate telnet lsof tcpdump wget libevent libevent-devel bc  systemd-devel bash-completion traceroute bridge-utils  -y

cat >> /etc/hosts << EOF
10.0.0.10 controller1
10.0.0.11 controller2
10.0.0.12 compute1
10.0.0.13 compute2
10.0.0.14 data1
10.0.0.15 data2
10.0.0.16 haproxy1
10.0.0.17 haproxy2

10.0.0.100 openstack.vip.cn
EOF

Haproxy编译部署

  • haproxy1与haproxy2机器
  • 解决lua环境
    • 官网下载:http://www.lua.org/ftp/lua-5.4.4.tar.gz
# 安装基础命令及编译依赖环境
yum install -y gcc readline-devel

mkdir /apps

tar xvf lua-5.4.4.tar.gz -C /apps/

cd /apps/lua-5.4.4/

make linux test

# 查看编译安装的版本
src/lua -v
  • haproxy1与haproxy2机器
    • 官网下载:http://www.haproxy.org/download/2.5/src/haproxy-2.5.7.tar.gz
yum install -y gcc openssl-devel pcre-devel systemd-devel

tar xvf haproxy-2.5.7.tar.gz

cd haproxy-2.5.7/

# 查看安装方法
ll Makefile

cat README

cat INSTALL

# 编译
make -j 4 TARGET=linux-glibc USE_OPENSSL=1 USE_ZLIB=1 USE_LUA=1 USE_PCRE=1 USE_SYSTEMD=1 LUA_INC=/apps/lua-5.4.4/src LUA_LIB=/apps/lua-5.4.4/src

# 安装
make install PREFIX=/apps/haproxy

ln -s /apps/haproxy/sbin/haproxy /usr/sbin/

haproxy -v
  • haproxy启动文件(haproxy1与haproxy2机器)
vim /usr/lib/systemd/system/haproxy.service
[Unit]
Description=HAProxy Load Balancer
After=syslog.target network.target

[Service]
ExecStartPre=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -q
ExecStart=/usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.pid
ExecReload=/bin/kill -USR2 $MAINPID

[Install]
WantedBy=multi-user.target

mkdir /var/lib/haproxy/

mkdir /etc/haproxy/

# 配置man日志
vim /etc/man_db.conf
MANDATORY_MANPATH     /apps/haproxy/share/man/

# 更新man数据库
mandb

# 配置文件
vim /etc/haproxy/haproxy.cfg
global
   maxconn 100000
   chroot /apps/haproxy
   stats socket /var/lib/haproxy/haproxy.sock mode 600 level admin
   #uid 99
   #gid 99
   user haproxy
   group haproxy
   daemon
   # nbproc 4
   # cpu-map 1 0
   # cpu-map 2 1
   # cpu-map 3 2
   # cpu-map 4 3
   pidfile /var/lib/haproxy/haproxy.pid
   log 127.0.0.1 local2 info

defaults
   option http-keep-alive
   option forwardfor
   maxconn 100000
   mode http
   timeout connect 300000ms
   timeout client 300000ms
   timeout server 300000ms

listen stats
   mode http
   bind 0.0.0.0:9999
   stats enable
   log global
   stats uri    /haproxy-status
   stats auth   admin:123456

useradd -r -s /sbin/nologin -d /var/lib/haproxy haproxy


# haproxy需要vip地址才能启动,这里不检测vip也能启动
vim /etc/sysctl.conf
net.ipv4.ip_nonlocal_bind = 1

sysctl -p


systemctl daemon-reload

systemctl enable --now haproxy

Haproxy+Keepalived

安装服务

  • haproxy1+haproxy2机器
yum install -y keepalived

配置haproxy高可用

  • haproxy1与haproxy2机器
  • 安装检测工具
yum install  -y psmisc
  • 使用非抢占式
    • 效果是当主VIP宕机时VIP飘移过后,重启主VIP也不会将VIP夺回来
  • haproxy1机器
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
   notification_email {
       360120854@qq.com
       ws3024321237@163.com
   }
   notification_email_from ws3024321237@163.com
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id haprxy1
   vrrp_skip_check_adv_addr
   vrrp_strict
   vrrp_iptables
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_script check_haproxy {
   script "killall -0 haproxy || systemctl restart haproxy"
   interval 2
}

vrrp_instance HA_openstack {
   state BACKUP
   interface eth0
   virtual_router_id 66
   priority 100
   advert_int 2     # 调用脚本两次之间的间隔,默认为1秒
   nopreempt

   track_script {
      check_haproxy
   }

   virtual_ipaddress {
      10.0.0.100/24 dev eth0 label eth0:1       
   }
   notify_master "/etc/keepalived/notify.sh master"
   notify_backup "/etc/keepalived/notify.sh backup"
   notify_fault "/etc/keepalived/notify.sh fault"
}
EOF



vim /etc/keepalived/notify.sh
#!/bin/bash
contact='360120854@qq.com'
notify() {
  mailsubject="$(hostname) 切换到 $1, vip 地址发生漂移"    # 发送标题
  mailbody="$(date +'%F %T'): vip发生漂移, $(hostname) 切换到 $1"   # 发送内容
  echo "$mailbody" | mail -s "$mailsubject" $contact
}
case $1 in
master)
  notify master
  ;;
backup)
  notify backup
  ;;
fault)
  notify fault
  ;;
*)
  echo "Usage: $(basename $0) {master|backup|fault}"
  exit 1
  ;;
esac

chmod +x /etc/keepalived/notify.sh

yum install -y mailx

vim /etc/mail.rc
''''
set from=360120854@qq.com
set smtp=smtp.qq.com
set smtp-auth-user=360120854@qq.com
set smtp-auth-password=ljroytmuhlkjbgje

systemctl restart keepalived
  • haproxy2机器
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
   notification_email {
       360120854@qq.com
       ws3024321237@163.com
   }
   notification_email_from ws3024321237@163.com
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id haprxy1
   vrrp_skip_check_adv_addr
   vrrp_strict
   vrrp_iptables
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_script check_haproxy {
   script "killall -0 haproxy || systemctl restart haproxy"
   interval 2
}

vrrp_instance HA_openstack {
   state BACKUP
   interface eth0
   virtual_router_id 66
   priority 80
   advert_int 2
   nopreempt

   track_script {
      check_haproxy
   }

   virtual_ipaddress {
      10.0.0.100/24 dev eth0 label eth0:1       
   }
   notify_master "/etc/keepalived/notify.sh master"
   notify_backup "/etc/keepalived/notify.sh backup"
   notify_fault "/etc/keepalived/notify.sh fault"
}
EOF



vim /etc/keepalived/notify.sh
#!/bin/bash
contact='360120854@qq.com'
notify() {
  mailsubject="$(hostname) 切换到 $1, vip 地址发生漂移"    # 发送标题
  mailbody="$(date +'%F %T'): vip发生漂移, $(hostname) 切换到 $1"   # 发送内容
  echo "$mailbody" | mail -s "$mailsubject" $contact
}
case $1 in
master)
  notify master
  ;;
backup)
  notify backup
  ;;
fault)
  notify fault
  ;;
*)
  echo "Usage: $(basename $0) {master|backup|fault}"
  exit 1
  ;;
esac

chmod +x /etc/keepalived/notify.sh

yum install -y mailx


vim /etc/mail.rc
''''
set from=360120854@qq.com
set smtp=smtp.qq.com
set smtp-auth-user=360120854@qq.com
set smtp-auth-password=ljroytmuhlkjbgje

systemctl restart keepalived

OpenStack-data

mysql主主架构

  • 采用二进制源码包部署,下载地址:https://downloads.mysql.com/archives/get/p/23/file/mysql-5.7.26-linux-glibc2.12-x86_64.tar.gz
  • 上传至data1机器,部署mysql
yum install -y libaio-devel

yum remove -y mariadb*

tar xf mysql-5.7.26-linux-glibc2.12-x86_64.tar.gz 

useradd -s /sbin/nologin mysql

mkdir /application/mysql -pv

mkdir /data/mysql/data -pv

mkdir /data/mysql/binlog -pv

echo  "PATH=/application/mysql/bin:$PATH" > /etc/profile.d/mysql.sh

source  /etc/profile.d/mysql.sh

mv mysql-5.7.26-linux-glibc2.12-x86_64/* /application/mysql/

mysqld --initialize-insecure  --user=mysql --basedir=/application/mysql --datadir=/data/mysql/data 

vim  /etc/my.cnf 
[mysqld]
user=mysql
basedir=/application/mysql
datadir=/data/mysql/data
socket=/tmp/mysql.sock
server_id=6
port=3306
log_bin=/data/mysql/binlog/mysql-bin
character_set_server=utf8
[mysql]
socket=/tmp/mysql.sock

chown -R mysql.mysql /data/

cp /application/mysql/support-files/mysql.server /etc/init.d/mysqld

service mysqld start

mysql

# 为集群架构准备用户
grant replication slave on *.* to repl@'%' identified by '123';
  • 上传至data2机器,部署mysql
yum install -y libaio-devel

yum remove -y mariadb*

tar xf mysql-5.7.26-linux-glibc2.12-x86_64.tar.gz 

useradd -s /sbin/nologin mysql

mkdir /application/mysql -pv

mkdir /data/mysql/data -pv

mkdir /data/mysql/binlog -pv

echo  "PATH=/application/mysql/bin:$PATH" > /etc/profile.d/mysql.sh

source  /etc/profile.d/mysql.sh

mv mysql-5.7.26-linux-glibc2.12-x86_64/* /application/mysql/

mysqld --initialize-insecure  --user=mysql --basedir=/application/mysql --datadir=/data/mysql/data 

vim  /etc/my.cnf 
[mysqld]
user=mysql
basedir=/application/mysql
datadir=/data/mysql/data
socket=/tmp/mysql.sock
server_id=12
port=3306
log_bin=/data/mysql/binlog/mysql-bin
character_set_server=utf8
[mysql]
socket=/tmp/mysql.sock

chown -R mysql.mysql /data/

cp /application/mysql/support-files/mysql.server /etc/init.d/mysqld

service mysqld start

mysql

# 为集群架构准备用户
grant replication slave on *.* to repl@'%' identified by '123';
  • 主主架构,data2机器执行
mysql

# data1节点查看二进制标识符与位置
show master status;

CHANGE MASTER TO
  MASTER_HOST='data1',
  MASTER_USER='repl',
  MASTER_PASSWORD='123',
  MASTER_PORT=3306,
  MASTER_LOG_FILE='mysql-bin.000001',
  MASTER_LOG_POS=437;
  
# 启动集群
start slave;

# 查看集群状态
show slave status\G
  • 主主架构,data1机器执行
mysql

# data2节点查看二进制标识符与位置
show master status;

CHANGE MASTER TO
  MASTER_HOST='data2',
  MASTER_USER='repl',
  MASTER_PASSWORD='123',
  MASTER_PORT=3306,
  MASTER_LOG_FILE='mysql-bin.000001',
  MASTER_LOG_POS=437;
  
# 启动集群
start slave;

# 查看集群状态
show slave status\G




===========================================
如果出现失败或错误,那么执行如下清理

stop slave;

reset slave;

RabbitMQ集群

  • data1与data2机器
yum install -y centos-release-openstack-train

yum install -y rabbitmq-server

systemctl enable --now rabbitmq-server

rabbitmqctl add_user openstack 000000

rabbitmqctl set_permissions openstack ".*" ".*" ".*"

rabbitmq-plugins enable rabbitmq_management
  • data2机器
scp data1:/var/lib/rabbitmq/.erlang.cookie /var/lib/rabbitmq/

systemctl restart rabbitmq-server.service

# 1.停止服务
rabbitmqctl stop_app
# 2.重置状态
rabbitmqctl reset
# 3.节点加入
rabbitmqctl join_cluster rabbit@data1
# 4.启动服务
rabbitmqctl start_app

memcached

  • data1与data2机器
yum install memcached -y

sed -i "s/127.0.0.1/0.0.0.0/g" /etc/sysconfig/memcached

systemctl enable --now memcached

haproxy配置data机器高可用

  • data1机器配置如下
    • data2暂时不配置,先将data1配置完成复制即可
  • haproxy代理检测配置详细如下
check #对指定real进行健康状态检查,如果不加此设置,默认不开启检查,check后面没有其它配置也可以启用检查功能
	  #默认对相应的后端服务器IP和端口,利用TCP连接进行周期性健康性检查,注意必须指定端口才能实现健康性检查
 	addr <IP>    #可指定的健康状态监测IP,可以是专门的数据网段,减少业务网络的流量
 	port <num>   #指定的健康状态监测端口
 	inter <num>  #健康状态检查间隔时间,默认2000 ms,单位是毫秒 =2s
 	fall <num>   #后端服务器从线上转为线下的检查的连续失效次数,默认为3
 	rise <num>   #后端服务器从下线恢复上线的检查的连续有效次数,默认为2
  • haproxy1机器
vim /etc/haproxy/haproxy.cfg
global
   maxconn 100000
   chroot /apps/haproxy
   stats socket /var/lib/haproxy/haproxy.sock mode 600 level admin
   #uid 99
   #gid 99
   user haproxy
   group haproxy
   daemon
   # nbproc 4
   # cpu-map 1 0
   # cpu-map 2 1
   # cpu-map 3 2
   # cpu-map 4 3
   pidfile /var/lib/haproxy/haproxy.pid
   log 127.0.0.1 local2 info

defaults
   option http-keep-alive
   option forwardfor
   maxconn 100000
   mode http
   timeout connect 300000ms
   timeout client 300000ms
   timeout server 300000ms

listen stats
   mode http
   bind 0.0.0.0:9999
   stats enable
   log global
   stats uri    /haproxy-status
   stats auth   admin:123456

listen mysql
   bind 10.0.0.100:3306
   mode tcp
   log global
   balance leastconn
   server data1 10.0.0.14:3306 check inter 3000 fall 2 rise 5
   server data2 10.0.0.15:3306 check inter 3000 fall 2 rise 5

listen rabbitmq
   bind 10.0.0.100:5672
   mode tcp
   log global
   balance leastconn
   server data1 10.0.0.14:5672 check inter 3000 fall 2 rise 5
   server data2 10.0.0.15:5672 check inter 3000 fall 2 rise 5

listen rabbitmq_web
   bind 10.0.0.100:15672
   mode http
   log global
   balance source
   server data1 10.0.0.14:15672 check inter 3000 fall 2 rise 5
   server data2 10.0.0.15:15672 check inter 3000 fall 2 rise 5

listen memcached
   bind 10.0.0.100:11211
   mode tcp
   log global
   balance source
   server data1 10.0.0.14:11211 check inter 3000 fall 2 rise 5
   server data2 10.0.0.15:11211 check inter 3000 fall 2 rise 5
   



# 平滑重启haproxy
systemctl reload haproxy.service

chrony部署

  • 所有机器
# 安装时间服务
yum install -y chrony
  • data1机器
vim /etc/chrony.conf 
server ntp6.aliyun.com iburst
allow all
local stratum 10

systemctl restart chronyd
clock -w
  • 除data1机器所有机器
vim /etc/chrony.conf
server data1 iburst

systemctl restart chronyd
clock -w

keystone部署

  • data1机器
mysql

CREATE DATABASE keystone;

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone123';
  • controller1机器
# 安装服务
yum install -y centos-release-openstack-train

yum install -y crudini python-openstackclient openstack-selinux

yum install -y openstack-keystone httpd mod_wsgi python2-PyMySQL python-memcached

# centos8或者其它环境安装区别
python3-PyMySQL
python3-mod_wsgi
#########################


# 备份过滤提前文件
cp /etc/keystone/keystone.conf{,.bak}

grep -Ev "^$|#" /etc/keystone/keystone.conf.bak > /etc/keystone/keystone.conf



# 使用工具配置keystone文件
crudini --set /etc/keystone/keystone.conf database connection mysql+pymysql://keystone:keystone123@openstack.vip.cn/keystone

crudini --set /etc/keystone/keystone.conf token provider fernet



# 同步数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone



# 初始化 Fernet 密钥存储库(提供这些是为了允许在另一个操作系统用户/组下运行 keystone)
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone




# 引导身份
keystone-manage bootstrap --bootstrap-password 000000 \
  --bootstrap-admin-url http://openstack.vip.cn:5000/v3/ \
  --bootstrap-internal-url http://openstack.vip.cn:5000/v3/ \
  --bootstrap-public-url http://openstack.vip.cn:5000/v3/ \
  --bootstrap-region-id RegionOne


# 配置 ServerName选项以引用控制器节点
echo "ServerName controller1" >> /etc/httpd/conf/httpd.conf



# 创建/usr/share/keystone/wsgi-keystone.conf文件的链接
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/


# 开机自启启动服务
systemctl enable --now httpd.service
  • haproxy1机器配置keystone代理
vim /etc/haproxy/haproxy.cfg
''''''
listen keystone
   bind 10.0.0.100:5000
   mode tcp
   log global
   balance random
   server controller1 10.0.0.10:5000 check inter 3000 fall 2 rise 5
   # controller2高可用没有做,先不要生效,配置完成再取消注释即可
   #server controller2 10.0.0.11:5000 check inter 3000 fall 2 rise 5
   
systemctl reload haproxy.service 
  • controller1机器
cat > /etc/keystone/admin-openrc.sh << EOF
#!/bin/bash
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=000000
export OS_AUTH_URL=http://openstack.vip.cn:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF



# 创建service 项目
openstack project create --domain default --description "Service Project" service

# 验证
openstack token issue

glance部署

  • data1机器
mysql

CREATE DATABASE glance;

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance123';
  • controller1机器
# 创建glance用户
openstack user create --domain default --password glance glance



# 将admin角色添加到glance用户和service项目
openstack role add --project service --user glance admin



# 创建glance服务
openstack service create --name glance --description "OpenStack Image" image



# 创建glance服务 API 端点
openstack endpoint create --region RegionOne image public http://openstack.vip.cn:9292

openstack endpoint create --region RegionOne image internal http://openstack.vip.cn:9292

openstack endpoint create --region RegionOne image admin http://openstack.vip.cn:9292



# 安装glance服务
yum install -y openstack-glance



# 备份提取文件
cp /etc/glance/glance-api.conf{,.bak}

grep -Ev "^$|#" /etc/glance/glance-api.conf.bak > /etc/glance/glance-api.conf



# 配置glance配置文件
crudini --set /etc/glance/glance-api.conf database connection mysql+pymysql://glance:glance123@openstack.vip.cn/glance

crudini --set /etc/glance/glance-api.conf keystone_authtoken www_authenticate_uri http://openstack.vip.cn:5000

crudini --set /etc/glance/glance-api.conf keystone_authtoken auth_url http://openstack.vip.cn:5000

crudini --set /etc/glance/glance-api.conf keystone_authtoken memcached_servers openstack.vip.cn:11211

crudini --set /etc/glance/glance-api.conf keystone_authtoken auth_type password

crudini --set /etc/glance/glance-api.conf keystone_authtoken project_domain_name Default

crudini --set /etc/glance/glance-api.conf keystone_authtoken user_domain_name Default

crudini --set /etc/glance/glance-api.conf keystone_authtoken project_name service

crudini --set /etc/glance/glance-api.conf keystone_authtoken username glance

crudini --set /etc/glance/glance-api.conf keystone_authtoken password glance

crudini --set /etc/glance/glance-api.conf paste_deploy flavor keystone

crudini --set /etc/glance/glance-api.conf glance_store stores file,http

crudini --set /etc/glance/glance-api.conf glance_store default_store file

crudini --set /etc/glance/glance-api.conf glance_store filesystem_store_datadir /var/lib/glance/images/



# 同步数据库
su -s /bin/sh -c "glance-manage db_sync" glance



# 开机自启并启动服务
systemctl enable --now openstack-glance-api.service
  • haproxy1机器
vim /etc/haproxy/haproxy.cfg
'''''''
listen glance
   bind 10.0.0.100:9292
   mode tcp
   log global
   balance random
   server controller1 10.0.0.10:9292 check inter 3000 fall 2 rise 5
   #server controller2 10.0.0.11:9292 check inter 3000 fall 2 rise 5
   


systemctl reload haproxy.service
  • controller1机器验证
# 下载镜像
wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img


# 上传镜像
glance image-create --name "cirros" --file cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare  --visibility public

# 验证
openstack image list

placement部署

  • data1机器
    • 创建placement库与用户
mysql

CREATE DATABASE placement;

GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'placement123';
  • controller1机器
# 创建用户
openstack user create --domain default --password placement placement



# 将 Placement 用户添加到具有管理员角色的服务项目中
openstack role add --project service --user placement admin



# 创建 Placement API
openstack service create --name placement --description "Placement API" placement

openstack endpoint create --region RegionOne placement public http://openstack.vip.cn:8778

openstack endpoint create --region RegionOne placement internal http://openstack.vip.cn:8778

openstack endpoint create --region RegionOne placement admin http://openstack.vip.cn:8778



# 安装服务
yum install -y openstack-placement-api



# 备份提取文件
cp /etc/placement/placement.conf{,.bak}

grep -Ev "^$|#" /etc/placement/placement.conf.bak  > /etc/placement/placement.conf



# 配置placement配置文件
crudini --set /etc/placement/placement.conf placement_database connection mysql+pymysql://placement:placement123@openstack.vip.cn/placement

crudini --set /etc/placement/placement.conf api auth_strategy keystone

crudini --set /etc/placement/placement.conf keystone_authtoken auth_url http://openstack.vip.cn:5000/v3

crudini --set /etc/placement/placement.conf keystone_authtoken memcached_servers openstack.vip.cn:11211

crudini --set /etc/placement/placement.conf keystone_authtoken auth_type password

crudini --set /etc/placement/placement.conf keystone_authtoken project_domain_name Default

crudini --set /etc/placement/placement.conf keystone_authtoken user_domain_name Default

crudini --set /etc/placement/placement.conf keystone_authtoken project_name service

crudini --set /etc/placement/placement.conf keystone_authtoken username placement

crudini --set /etc/placement/placement.conf keystone_authtoken password placement



# 同步数据库
su -s /bin/sh -c "placement-manage db sync" placement



# 启用 Placement API 的访问
cat >> /etc/httpd/conf.d/00-placement-api.conf << EOF
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>
EOF



# 重启http服务启动placement
systemctl restart httpd
  • haproxy1节点
vim /etc/haproxy/haproxy.cfg
'''''
listen placement
   bind 10.0.0.100:8778
   mode tcp
   log global
   balance random
   server controller1 10.0.0.10:8778 check inter 3000 fall 2 rise 5
   #server controller2 10.0.0.11:8778 check inter 3000 fall 2 rise 5
   

# 平滑重启haproxy服务
systemctl reload haproxy.service
  • controller1机器验证placement服务
placement-status upgrade check

nova-controller部署

  • data1机器
mysql

CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;

GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova123';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova123';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'nova123';
  • controller1机器
# 创建nova用户
openstack user create --domain default --password nova nova

# 将nova加入admin角色,使admin角色可管理
openstack role add --project service --user nova admin

# 创建nova api
openstack service create --name nova --description "OpenStack Compute" compute

openstack endpoint create --region RegionOne compute public http://openstack.vip.cn:8774/v2.1

openstack endpoint create --region RegionOne compute internal http://openstack.vip.cn:8774/v2.1

openstack endpoint create --region RegionOne compute admin http://openstack.vip.cn:8774/v2.1



# 安装服务
yum install -y openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler


# 备份提取文件
cp /etc/nova/nova.conf{,.bak}
grep -Ev "^$|#" /etc/nova/nova.conf.bak > /etc/nova/nova.conf



# 配置nova文件
crudini --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata

crudini --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:nova123@openstack.vip.cn/nova_api

crudini --set /etc/nova/nova.conf database connection mysql+pymysql://nova:nova123@openstack.vip.cn/nova

crudini --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:000000@openstack.vip.cn:5672/

crudini --set /etc/nova/nova.conf api auth_strategy keystone

crudini --set /etc/nova/nova.conf keystone_authtoken www_authenticate_uri http://openstack.vip.cn:5000/

crudini --set /etc/nova/nova.conf keystone_authtoken auth_url http://openstack.vip.cn:5000/

crudini --set /etc/nova/nova.conf keystone_authtoken memcached_servers openstack.vip.cn:11211

crudini --set /etc/nova/nova.conf keystone_authtoken auth_type password

crudini --set /etc/nova/nova.conf keystone_authtoken project_domain_name Default

crudini --set /etc/nova/nova.conf keystone_authtoken user_domain_name Default

crudini --set /etc/nova/nova.conf keystone_authtoken project_name service

crudini --set /etc/nova/nova.conf keystone_authtoken username nova

crudini --set /etc/nova/nova.conf keystone_authtoken password nova

crudini --set /etc/nova/nova.conf DEFAULT my_ip 10.0.0.10

crudini --set /etc/nova/nova.conf DEFAULT use_neutron true

crudini --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver

crudini --set /etc/nova/nova.conf vnc enabled true

crudini --set /etc/nova/nova.conf vnc server_listen ' $my_ip'

crudini --set /etc/nova/nova.conf vnc server_proxyclient_address ' $my_ip'

crudini --set /etc/nova/nova.conf glance api_servers http://openstack.vip.cn:9292

crudini --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp

crudini --set /etc/nova/nova.conf placement region_name RegionOne

crudini --set /etc/nova/nova.conf placement project_domain_name Default

crudini --set /etc/nova/nova.conf placement project_name service

crudini --set /etc/nova/nova.conf placement auth_type password

crudini --set /etc/nova/nova.conf placement user_domain_name Default

crudini --set /etc/nova/nova.conf placement auth_url http://openstack.vip.cn:5000/v3

crudini --set /etc/nova/nova.conf placement username placement

crudini --set /etc/nova/nova.conf placement password placement



# 同步数据库
su -s /bin/sh -c "nova-manage api_db sync" nova

su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova

su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova

su -s /bin/sh -c "nova-manage db sync" nova

su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova



# 自启并启动服务
systemctl enable --now openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  • haproxy1节点
vim /etc/haproxy/haproxy.cfg
'''''
listen nova
   bind 10.0.0.100:8774
   mode tcp
   log global
   balance random
   server controller1 10.0.0.10:8774 check inter 3000 fall 2 rise 5
   #server controller2 10.0.0.11:8774 check inter 3000 fall 2 rise 5

listen nova-metadata
   bind 10.0.0.100:8775
   mode tcp
   log global
   balance random
   server controller1 10.0.0.10:8775 check inter 3000 fall 2 rise 5
   #server controller2 10.0.0.11:8775 check inter 3000 fall 2 rise 5

listen nova-vnc
   bind 10.0.0.100:6080
   mode tcp
   log global
   balance random
   server controller1 10.0.0.10:6080 check inter 3000 fall 2 rise 5
   #server controller2 10.0.0.11:6080 check inter 3000 fall 2 rise 5
   


systemctl reload haproxy.service

nova-compute部署

  • compute1机器
# 安装源与nova服务
yum install -y centos-release-openstack-train

yum install -y openstack-nova-compute crudini



# 备份提取文件
cp /etc/nova/nova.conf{,.bak}

grep -Ev "^$|#" /etc/nova/nova.conf.bak > /etc/nova/nova.conf



# 配置nova文件
crudini --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata

crudini --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:000000@openstack.vip.cn

crudini --set /etc/nova/nova.conf api auth_strategy keystone

crudini --set /etc/nova/nova.conf keystone_authtoken www_authenticate_uri http://openstack.vip.cn:5000/

crudini --set /etc/nova/nova.conf keystone_authtoken auth_url http://openstack.vip.cn:5000/

crudini --set /etc/nova/nova.conf keystone_authtoken memcached_servers openstack.vip.cn:11211

crudini --set /etc/nova/nova.conf keystone_authtoken auth_type password

crudini --set /etc/nova/nova.conf keystone_authtoken project_domain_name Default

crudini --set /etc/nova/nova.conf keystone_authtoken user_domain_name Default

crudini --set /etc/nova/nova.conf keystone_authtoken project_name service

crudini --set /etc/nova/nova.conf keystone_authtoken username nova

crudini --set /etc/nova/nova.conf keystone_authtoken password nova

crudini --set /etc/nova/nova.conf DEFAULT my_ip 10.0.0.12

crudini --set /etc/nova/nova.conf DEFAULT use_neutron true

crudini --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver

crudini --set /etc/nova/nova.conf vnc enabled true

crudini --set /etc/nova/nova.conf vnc server_listen 0.0.0.0

crudini --set /etc/nova/nova.conf vnc server_proxyclient_address ' $my_ip'

crudini --set /etc/nova/nova.conf vnc novncproxy_base_url http://openstack.vip.cn:6080/vnc_auto.html

crudini --set /etc/nova/nova.conf glance api_servers http://openstack.vip.cn:9292

crudini --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp

crudini --set /etc/nova/nova.conf placement region_name RegionOne

crudini --set /etc/nova/nova.conf placement project_domain_name Default

crudini --set /etc/nova/nova.conf placement project_name service

crudini --set /etc/nova/nova.conf placement auth_type password

crudini --set /etc/nova/nova.conf placement user_domain_name Default

crudini --set /etc/nova/nova.conf placement auth_url http://openstack.vip.cn:5000/v3

crudini --set /etc/nova/nova.conf placement username placement

crudini --set /etc/nova/nova.conf placement password placement

crudini --set /etc/nova/nova.conf libvirt hw_machine_type x86_64=pc-i440fx-rhel7.2.0

crudini --set /etc/nova/nova.conf libvirt cpu_mode host-passthrough

# 自启启动服务
systemctl enable --now libvirtd.service openstack-nova-compute.service
  • controller1机器
# 配置主机自动发现,五分钟一次
crudini --set /etc/nova/nova.conf scheduler discover_hosts_in_cells_interval 300

# 重启生效配置
systemctl restart openstack-nova-api

# 查看有的计算节点
openstack compute service list --service nova-compute

# 主机发现
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
  • controller1机器验证nova
openstack compute service list

neutron-controller部署

  • data1机器
mysql

CREATE DATABASE neutron;

GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron123';
  • controller1机器
# 创建neutron用户
openstack user create --domain default --password neutron neutron



# 将neutron用户添加到admin角色管理
openstack role add --project service --user neutron admin



# 查看neutron - API
openstack service create --name neutron --description "OpenStack Networking" network

openstack endpoint create --region RegionOne network public http://openstack.vip.cn:9696

openstack endpoint create --region RegionOne network internal http://openstack.vip.cn:9696

openstack endpoint create --region RegionOne network admin http://openstack.vip.cn:9696



# 安装neutron服务
yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables



# 备份提取文件
cp  /etc/neutron/neutron.conf{,.bak}

grep -Ev "^$|#" /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf




# 配置neutron文件
crudini --set /etc/neutron/neutron.conf database connection mysql+pymysql://neutron:neutron123@openstack.vip.cn/neutron

crudini --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2

crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins router

crudini --set /etc/neutron/neutron.conf DEFAULT allow_overlapping_ips true

crudini --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:000000@openstack.vip.cn

crudini --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone

crudini --set /etc/neutron/neutron.conf keystone_authtoken www_authenticate_uri http://openstack.vip.cn:5000

crudini --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://openstack.vip.cn:5000

crudini --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers openstack.vip.cn:11211

crudini --set /etc/neutron/neutron.conf keystone_authtoken auth_type password

crudini --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default

crudini --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default

crudini --set /etc/neutron/neutron.conf keystone_authtoken project_name service

crudini --set /etc/neutron/neutron.conf keystone_authtoken username neutron

crudini --set /etc/neutron/neutron.conf keystone_authtoken password neutron

crudini --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_status_changes true

crudini --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes true

crudini --set /etc/neutron/neutron.conf nova auth_url http://openstack.vip.cn:5000

crudini --set /etc/neutron/neutron.conf nova auth_type password

crudini --set /etc/neutron/neutron.conf nova project_domain_name default

crudini --set /etc/neutron/neutron.conf nova user_domain_name default

crudini --set /etc/neutron/neutron.conf nova region_name RegionOne

crudini --set /etc/neutron/neutron.conf nova project_name service

crudini --set /etc/neutron/neutron.conf nova username nova

crudini --set /etc/neutron/neutron.conf nova password nova

crudini --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp



# 配置ml2_conf文件
cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak}

grep -Ev "^$|#" /etc/neutron/plugins/ml2/ml2_conf.ini.bak > /etc/neutron/plugins/ml2/ml2_conf.ini

crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vlan,vxlan

crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan

crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers linuxbridge,l2population

crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security

crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks extnal

crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vni_ranges 1:1000

crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_ipset true



# 配置linuxbridge_agent文件
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}

grep -Ev "^$|#" /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini

crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings extnal:eth0

crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan true

crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip 10.0.0.10

crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population true

crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true

crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver



# 配置内核参数,确保 Linux 操作系统内核支持网桥过滤器
cat >> /etc/sysctl.conf << EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

modprobe br_netfilter
sysctl -p


# 配置l3_agent三层文件
cp /etc/neutron/l3_agent.ini{,.bak}

grep -Ev "^$|#" /etc/neutron/l3_agent.ini.bak > /etc/neutron/l3_agent.ini

crudini --set /etc/neutron/l3_agent.ini DEFAULT interface_driver linuxbridge



# 配置dhcp_agent文件
cp  /etc/neutron/dhcp_agent.ini{,.bak}

grep -Ev "^$|#" /etc/neutron/dhcp_agent.ini.bak > /etc/neutron/dhcp_agent.ini

crudini --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver linuxbridge

crudini --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver neutron.agent.linux.dhcp.Dnsmasq

crudini --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata true



# 配置metadata_agent文件
cp /etc/neutron/metadata_agent.ini{,.bak}

grep -Ev "^$|#" /etc/neutron/metadata_agent.ini.bak > /etc/neutron/metadata_agent.ini

crudini --set /etc/neutron/metadata_agent.ini DEFAULT nova_metadata_host openstack.vip.cn

crudini --set /etc/neutron/metadata_agent.ini DEFAULT metadata_proxy_shared_secret angteacher




# 配置nova文件的neutron信息
crudini --set /etc/nova/nova.conf neutron auth_url http://openstack.vip.cn:5000

crudini --set /etc/nova/nova.conf neutron auth_type password

crudini --set  /etc/nova/nova.conf neutron project_domain_name default

crudini --set  /etc/nova/nova.conf neutron user_domain_name default

crudini --set  /etc/nova/nova.conf neutron region_name RegionOne

crudini --set  /etc/nova/nova.conf neutron project_name service

crudini --set  /etc/nova/nova.conf neutron username neutron

crudini --set  /etc/nova/nova.conf neutron password neutron

crudini --set  /etc/nova/nova.conf neutron service_metadata_proxy true

crudini --set  /etc/nova/nova.conf neutron metadata_proxy_shared_secret angteacher

# 网络服务初始化脚本需要一个 /etc/neutron/plugin.ini指向 ML2 插件配置文件的符号链接
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini



# 同步数据库
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron



# 重启nova服务时neutron配置项生效
systemctl restart openstack-nova-api.service



# 自启启动neutron服务
systemctl enable --now neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-l3-agent.service
  • haproxy1机器
vim /etc/haproxy/haproxy.cfg
''''
listen neutron
   bind 10.0.0.100:9696
   mode tcp
   log global
   balance random
   server controller1 10.0.0.10:9696 check inter 3000 fall 2 rise 5
   #server controller2 10.0.0.11:9696 check inter 3000 fall 2 rise 5



systemctl reload haproxy.service

neutron-compute部署

  • compute1节点
# 安装服务
yum install -y openstack-neutron-linuxbridge ebtables ipset



# 配置neutron文件
cp /etc/neutron/neutron.conf{,.bak}

grep -Ev "^$|#" /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf

crudini --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:000000@openstack.vip.cn

crudini --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone

crudini --set /etc/neutron/neutron.conf keystone_authtoken www_authenticate_uri http://openstack.vip.cn:5000

crudini --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://openstack.vip.cn:5000

crudini --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers openstack.vip.cn:11211

crudini --set /etc/neutron/neutron.conf keystone_authtoken auth_type password

crudini --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default

crudini --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default

crudini --set /etc/neutron/neutron.conf keystone_authtoken project_name service

crudini --set /etc/neutron/neutron.conf keystone_authtoken username neutron

crudini --set /etc/neutron/neutron.conf keystone_authtoken password neutron

crudini --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp



# 配置linuxbridge_agent文件
cp  /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}

grep -Ev "^$|#" /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini

crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings extnal:eth0

crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan true

crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip 10.0.0.12

crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population true

crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true

crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver



# 配置内核参数,确保 Linux 操作系统内核支持网桥过滤器
cat >> /etc/sysctl.conf << EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

modprobe br_netfilter
sysctl -p



# 配置nova的neutron文件
crudini --set /etc/nova/nova.conf neutron auth_url http://openstack.vip.cn:5000

crudini --set /etc/nova/nova.conf neutron auth_type password

crudini --set /etc/nova/nova.conf neutron project_domain_name default

crudini --set /etc/nova/nova.conf neutron user_domain_name default

crudini --set /etc/nova/nova.conf neutron region_name RegionOne

crudini --set /etc/nova/nova.conf neutron project_name service

crudini --set /etc/nova/nova.conf neutron username neutron

crudini --set /etc/nova/nova.conf neutron password neutron




# 重启nova生成neutron配置
systemctl restart openstack-nova-compute.service

# 自启启动服务
systemctl enable --now neutron-linuxbridge-agent.service
  • controller1机器验证
openstack network agent list

dashboard部署

  • controller1机器
# 安装服务
yum install -y openstack-dashboard


# 配置文件
vim /etc/openstack-dashboard/local_settings
'''
OPENSTACK_HOST = "controller1"
ALLOWED_HOSTS = ["*"]
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
    'default': {
        'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
        'LOCATION': 'openstack.vip.cn:11211',
    },
}
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 3,
}
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
WEBROOT = "/dashboard"
TIME_ZONE = "Asia/Shanghai"
'''



vim /etc/httpd/conf.d/openstack-dashboard.conf
''''
WSGIApplicationGroup %{GLOBAL}
''''



# 重启启动服务
systemctl restart httpd.service
  • haproxy1机器
vim /etc/haproxy/haproxy.cfg
listen dashboard
   bind 10.0.0.100:80
   mode tcp
   log global
   balance source
   server controller1 10.0.0.10:80 check inter 3000 fall 2 rise 5
   #server controller2 10.0.0.11:80 check inter 3000 fall 2 rise 5



systemctl reload haproxy.service

keystone高可用部署

  • controller2机器
# 安装服务
yum install -y centos-release-openstack-train

yum install -y crudini python-openstackclient openstack-selinux

yum install -y openstack-keystone httpd mod_wsgi python2-PyMySQL python-memcached
  • controller1机器
    • 由于数据配置都已经初始化与提供了,只需要配置文件启动即可
cd /etc/keystone/

# 打包配置文件
tar cvf keystone.tar.gz ./*

scp keystone.tar.gz root@controller2:/etc/keystone/
  • controller2机器
# 解压配置文件
tar xvf /etc/keystone/keystone.tar.gz -C /etc/keystone/



# 配置 ServerName选项以引用控制器节点
echo "ServerName controller2" >> /etc/httpd/conf/httpd.conf



# 创建/usr/share/keystone/wsgi-keystone.conf文件的链接
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/


# 开机自启启动服务
systemctl enable --now httpd.service
  • haproxy1机器
    • haproxy会自动判断代理的控制节点状态,如果某台崩掉会自动下线调度到正常机器
# 配置controller2机器的keystone代理
vim /etc/haproxy/haproxy.cfg
'''
listen keystone
   bind 10.0.0.100:5000
   mode tcp
   log global
   balance random
   server controller1 10.0.0.10:5000 check inter 3000 fall 2 rise 5
   server controller2 10.0.0.11:5000 check inter 3000 fall 2 rise 5
'''


# 平滑重启
systemctl reload haproxy.service

glance高可用部署

  • controller2机器
# 安装服务
yum install -y openstack-glance
  • controller1机器
cd /etc/glance/

# 打包配置文件
tar cvf glance.tar.gz ./*

scp glance.tar.gz root@controller2:/etc/glance/
  • controller2机器
tar xvf /etc/glance/glance.tar.gz -C /etc/glance/

# 自启启动服务
systemctl enable --now openstack-glance-api.service
  • haproxy1机器
vim /etc/haproxy/haproxy.cfg
listen glance
   bind 10.0.0.100:9292
   mode tcp
   log global
   balance random
   server controller1 10.0.0.10:9292 check inter 3000 fall 2 rise 5
   server controller2 10.0.0.11:9292 check inter 3000 fall 2 rise 5
   


systemctl reload haproxy.service
  • 配置glance共享存储
    • 如果不配置,会导致控制节点数据不一致问题
  • data1机器
yum -y install nfs-utils

mkdir /data/glance -p

vim /etc/exports
/data/glance *(rw,no_root_squash)

chown -R 161.161 /data/glance/

systemctl enable --now nfs
  • controller1与controller2机器
# 挂载
mount -t nfs data1:/data/glance /var/lib/glance/images

# 配置永久挂载
vim /etc/fstab
data1:/data/glance /var/lib/glance/images nfs defaults,_netdev 0 0

PS: _netdev表示当系统联网后再进行挂载操作,以免系统开机时间过长或开机失败:

placement高可用部署

  • controller2机器
# 安装服务
yum install -y openstack-placement-api
  • controller1机器
cd /etc/placement/

tar cvf placement.tat.gz  ./*

scp placement.tat.gz root@controller2:/etc/placement/
  • controller2机器
tar xvf /etc/placement/placement.tat.gz -C /etc/placement/

# 启用 Placement API 的访问
cat >> /etc/httpd/conf.d/00-placement-api.conf << EOF
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>
EOF



# 重启http服务启动placement
systemctl restart httpd
  • haproxy1机器
vim /etc/haproxy/haproxy.cfg
'''
listen placement
   bind 10.0.0.100:8778
   mode tcp
   log global
   balance random
   server controller1 10.0.0.10:8778 check inter 3000 fall 2 rise 5
   server controller2 10.0.0.11:8778 check inter 3000 fall 2 rise 5
'''


systemctl reload haproxy.service

nova高可用部署

  • controller2机器
yum install -y openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler
  • controller1机器
cd /etc/nova/

tar cvf nova.tar.gz ./*

scp nova.tar.gz root@controller2:/etc/nova/
  • controller2机器
tar xvf /etc/nova/nova.tar.gz -C /etc/nova/

sed -i "s/10.0.0.10/10.0.0.11/g" /etc/nova/nova.conf

# 自启并启动服务
systemctl enable --now openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  • haproxy1节点
vim /etc/haproxy/haproxy.cfg
'''
listen nova
   bind 10.0.0.100:8774
   mode tcp
   log global
   balance random
   server controller1 10.0.0.10:8774 check inter 3000 fall 2 rise 5
   server controller2 10.0.0.11:8774 check inter 3000 fall 2 rise 5

listen nova-metadata
   bind 10.0.0.100:8775
   mode tcp
   log global
   balance random
   server controller1 10.0.0.10:8775 check inter 3000 fall 2 rise 5
   server controller2 10.0.0.11:8775 check inter 3000 fall 2 rise 5

listen nova-vnc
   bind 10.0.0.100:6080
   mode tcp
   log global
   balance random
   server controller1 10.0.0.10:6080 check inter 3000 fall 2 rise 5
   server controller2 10.0.0.11:6080 check inter 3000 fall 2 rise 5
'''


systemctl reload haproxy.service

neutron高可用部署

  • controller2机器
# 安装服务
yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables

# 配置内核参数,确保 Linux 操作系统内核支持网桥过滤器
cat >> /etc/sysctl.conf << EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

modprobe br_netfilter
sysctl -p
  • controller1机器
cd /etc/neutron/

tar cvf neutron.tar.gz ./*

scp neutron.tar.gz root@controller2:/etc/neutron/
  • controller2机器
# 解压文件
tar xvf /etc/neutron/neutron.tar.gz -C /etc/neutron/


# 修改配置信息
sed -i "s/10.0.0.10/10.0.0.11/g" /etc/neutron/plugins/ml2/linuxbridge_agent.ini


# 重启nova服务时neutron配置项生效
systemctl restart openstack-nova-api.service



# 自启启动neutron服务
systemctl enable --now neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-l3-agent.service
  • haproxy1机器
vim /etc/haproxy/haproxy.cfg
'''
listen neutron
   bind 10.0.0.100:9696
   mode tcp
   log global
   balance random
   server controller1 10.0.0.10:9696 check inter 3000 fall 2 rise 5
   server controller2 10.0.0.11:9696 check inter 3000 fall 2 rise 5
'''



systemctl reload haproxy.service

dashboard高可用部署

  • controller2机器
# 安装服务
yum install -y openstack-dashboard


# 拷贝配置文件
scp root@controller1:/etc/openstack-dashboard/local_settings /etc/openstack-dashboard/local_settings

scp root@controller1:/etc/httpd/conf.d/openstack-dashboard.conf /etc/httpd/conf.d/openstack-dashboard.conf


# 修改配置文件信息
sed -i "s/controller1/controller2/g" /etc/openstack-dashboard/local_settings


# 重启httpd加载服务
systemctl restart httpd
  • haproxy1机器
vim /etc/haproxy/haproxy.cfg
''''
listen dashboard
   bind 10.0.0.100:80
   mode tcp
   log global
   balance source
   server controller1 10.0.0.10:80 check inter 3000 fall 2 rise 5
   server controller2 10.0.0.11:80 check inter 3000 fall 2 rise 5
''''


systemctl reload haproxy.service
  • controller2机器验证
source /etc/keystone/admin-openrc.sh


# 全部为up状态
nova service-list


# 全部为 :-) 状态
neutron agent-list

添加计算节点

nova

  • compute2机器
# 安装源与nova服务
yum install -y centos-release-openstack-train

yum install -y openstack-nova-compute crudini
  • compute1机器
cd /etc/nova/

tar cvf nova-compute.tar.gz ./*

scp nova-compute.tar.gz root@compute2:/etc/nova/
  • compute2机器
tar xvf /etc/nova/nova-compute.tar.gz -C /etc/nova/

sed -i "s/10.0.0.12/10.0.0.13/g" /etc/nova/nova.conf

# 自启启动服务
systemctl enable --now libvirtd.service openstack-nova-compute.service
  • 任意控制节点
    • 主机发现
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova

neutron

  • compute2机器
# 安装服务
yum install -y openstack-neutron-linuxbridge ebtables ipset

# 配置内核参数,确保 Linux 操作系统内核支持网桥过滤器
cat >> /etc/sysctl.conf << EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

modprobe br_netfilter
sysctl -p
  • compute1机器
cd /etc/neutron/

tar cvf neutron-compute.tar.gz ./*

scp neutron-compute.tar.gz root@compute2:/etc/neutron/
  • compute2机器
tar xvf /etc/neutron/neutron-compute.tar.gz -C /etc/neutron/

sed -i "s/10.0.0.12/10.0.0.13/g" /etc/neutron/plugins/ml2/linuxbridge_agent.ini

# 重启nova生成neutron配置
systemctl restart openstack-nova-compute.service

# 自启启动服务
systemctl enable --now neutron-linuxbridge-agent.service

调整openstack资源限制

  • 调整实例、VCU、内存限制
  • 所有控制节点
vim /etc/nova/nova.conf
[quota]
# 实例数量
instances=99999
# vCPU数量
cores=2000
# 内存大小
ram=512000


# 重启生效
systemctl restart openstack-nova-api.service
  • 修改网络、安全组、路由器、浮动地址等配额(重点,这个是web也调整不了的值,这是openstack内部限制,所有节点都需要加)
  • 所有控制节点
# 查看当前数量
neutron quota-show admin


vim /etc/neutron/neutron.conf
[quotas]
quota_network = 1000
quota_subnet = 1000
quota_port = 5000
quota_driver = neutron.db.quota.driver.DbQuotaDriver
quota_router = 100
quota_floatingip = 1000
quota_security_group = 100
quota_security_group_rule = 1000


# 重启生效
systemctl restart neutron-server.service
  • WEB配置文件开启配额限制
  • 所有控制节点
vim /etc/openstack-dashboard/local_settings
'''
'enable_quotas': True,
'''


systemctl restart httpd

openstack优化配置

  • 计算节点 /etc/nova/nova.conf 进行以下配置
  • 配置云主机自启动(必须加)
[DEFAULT]
resume_guests_state_on_host_boot=true
  • 配置CPU超限使用
# 默认为16,即允许开启16倍于物理cpu的虚拟cpu个数
[DEFAULT]
cpu_allocation_ratio=16
  • 配置内存超限使用
# 配置允许1.5倍于物理内存的虚拟内存
[DEFAULT]
ram_allocation_ratio=1.5
  • 配置硬盘超限使用(不建议启动,会导致数据丢失)
# 硬盘尽量不要超限,可能会导致数据出现丢失
[DEFAULT]
disk_allocation_ratio=1.0
  • 配置保留磁盘空间
# 即会预留一部分磁盘空间给系统使用
[DEFAULT]
reserved_host_disk_mb=20480
  • 配置预留内存给系统使用
# 即预留一定的内存给系统使用
[DEFAULT]
reserved_host_memory_mb=512
  • 配置虚拟机类型动态调整
# 有些时候,创建完成的虚拟机因业务需求需要变更内存或cpu或磁盘,因此需要配置允许后期类型调整
[DEFAULT]
allow_resize_to_same_host=true		# 允许调整
  • 重启服务生效
systemctl restart openstack-nova-compute.service

openstack跨主机迁移/类型调整

  • 当发生主机迁移时,迁移主机会向被迁移主机把当前的云主机文件目录拷贝过去,在被迁移主机启动云主机
  • 调整类型过程和主机迁移一致
  • 开启nova用户登录(所有计算节点)
usermod nova -s /bin/bash
  • 配置nova登录密码(所有计算节点)
echo 000000 | passwd --stdin nova
  • 生成密钥对,配置免密登录(所有计算节点)
su - nova

ssh-keygen		# 一直回车即可

ssh-copy-id nova@compute1

ssh-copy-id nova@compute2
  • 配置(所有计算节点)
# 修改配置文件
vim /etc/libvirt/libvirtd.conf
listen_tls = 0
listen_tcp = 1
auth_tcp = "none"

vim /etc/sysconfig/libvirtd
LIBVIRTD_ARGS="--listen"	# 取消注释

systemctl restart libvirtd.service openstack-nova-compute.service

# 在私有云平台中点击云主机热迁移,勾选允许磁盘超量、块设备迁移

# 如果不行,那么两个节点执行此命令开放内存限制重启服务即可
echo 1 > /proc/sys/vm/overcommit_memory
  • 自行完成云主机热/冷迁移和调整云主机大小

haproxy高可用

  • haproxy2机器
scp haproxy1:/etc/haproxy/haproxy.cfg /etc/haproxy/
  • haproxy1机器
    • 将vip地址漂移过去,使haproxy2节点可以重启haproxy服务
systemctl restart keepalived.service
  • haproxy2机器
systemctl stop haproxy.service 

OpenStack-API

控制节点安装python3环境和相关模块

# 安装python3
yum install -y python3

# 安装API需要的模块
pip3 install requests -i https://pypi.douban.com/simple


mkdir openstack-api


cd openstack-api

动态获取token

  • 所有API资源创建都需要通过token认证
vim auth.py
#!/usr/bin/python3
import json, requests

# 定义全局路径
headers = {}
headers["Content-Type"] = "application/json"
os_auth_url = 'http://10.0.0.100:5000/v3/auth/tokens'  # 填入controller节点IP

# 多步骤身份验证(2-Factor密码和TOTP)定义请求头
body = {
    "auth": {                                              #授权
        "identity": {                                      #身份
            "methods": ["password"],                       #验证方法:密码
            "password": {
                "user": {
                    "id": "b09096c15b814053bf25264a3dbe5366",  #根据controller的user_id
                    "password": "000000"
                }
            }
        },
        "scope": {                                          #范围
            "project": {
                "id": "93ad92c3e318431099327595bf689acc"    #根据controller的project_id
            }
        }
    }
}


#获取token值
def get_token():
    reponse = requests.post(os_auth_url, data=json.dumps(body), headers=headers).headers["X-Subject-Token"]     # dumps序列化回服务端
    return reponse

创建flavor(云主机类型)

vim flavor.py
#!/usr/bin/python3
import json, requests
from auth import get_token

headers = {}
headers['X-Auth-Token'] = get_token()
url = "http://10.0.0.100:8774/v2.1/flavors"

body = {
    "flavor": {
        "name": "C2_2G_20G",
        "vcpus": 2,
        "ram": 2048,
        "disk": 20,
    }
}

def create_flavor():
    re = requests.post(url, data=json.dumps(body), headers=headers).json()
    print(re)


def main_flavor():
    re = requests.get(url, headers=headers).json()
    name = re['flavors']
    for flavor_name in name:
        print(flavor_name["name"])
        if flavor_name["name"] == body["flavor"]["name"]:
            requests.delete(url + f"/{flavor_name['id']}", headers=headers)
            print('....已删除同名云主机类型...正在新建中')

    else:
        create_flavor()
        print('云主机类型创建成功...')

if __name__ == '__main__':
    main_flavor()

创建network(网络)

  • vxlan网络
vim network_vxlan.py
#!/usr/bin/python3
import json, requests
from auth import get_token


class networks_vxlan:
    headers = {}
    headers['X-Auth-Token'] = get_token()
    headers['Content-Type'] = 'application/json'
    url = "http://10.0.0.100:9696/v2.0/"
    body = {
        "network": {
            "name": "intnet_vxlan",
            'provider:network_type': 'vxlan',
            'router:external': False,
            'shared': True,

        }
    }

    def intnet_vxlan(self):
        re = requests.post(self.url + "networks", data=json.dumps(self.body), headers=self.headers).json()
        print(re)

    def intsubnet_vxlan(self):
        intnet = requests.get(self.url + "networks", headers=self.headers).json()["networks"]
        for net_id in intnet:
            if net_id['name'] == self.body['network']['name']:
                intnet_id = net_id['id']

        body = {
            "subnet": {
                "name": "intsubnet-vxlan",
                'network_id': intnet_id,
                'cidr': '166.66.66.0/24',
                'gateway_ip': '166.66.66.1',
                'ip_version': 4,
                'dns_nameservers': ['223.6.6.6'],
            }
        }
        re = requests.post(self.url + "subnets", data=json.dumps(body), headers=self.headers).json()
        print(re)

    def main_network(self):
        re = requests.get(self.url + "networks", headers=self.headers).json()
        name = re["networks"]
        for net_name in name:
            print(net_name['name'])
            if net_name['name'] == self.body['network']['name']:
                requests.delete(self.url + f"networks/{net_name['id']}", headers=self.headers)
                print('删除同名网络....新建网络中...')
        else:
            self.intnet_vxlan()
            self.intsubnet_vxlan()
            print('新建网络成功')


if __name__ == '__main__':
    net = networks_vxlan()
    net.main_network()

  • flat网络
vim network_flat.py
#!/usr/bin/python3
import json, requests
from auth import get_token


class networks_flat:
    headers = {}
    headers['X-Auth-Token'] = get_token()
    headers['Content-Type'] = 'application/json'
    url = "http://10.0.0.100:9696/v2.0/"
    body = {
        "network": {
            "name": "extnet_flat",
            'provider:network_type': 'flat',
            'provider:physical_network': 'extnal',
            'router:external': True,
            'shared': False,
        }
    }

    def extnet_flat(self):
        re = requests.post(self.url + "networks", data=json.dumps(self.body), headers=self.headers).json()
        print(re)

    def extsubnet_flat(self):
        intnet = requests.get(self.url + "networks", headers=self.headers).json()["networks"]
        for net_id in intnet:
            if net_id['name'] == self.body['network']['name']:
                intnet_id = net_id['id']
        body = {
            "subnet": {
                "name": "extsubnet_flat",
                'network_id': intnet_id,
                'cidr': '10.0.0.0/24',
                'gateway_ip': '10.0.0.254',
                'ip_version': 4,
                'dns_nameservers': ['223.6.6.6'],
                'allocation_pools': [
                    {
                        'start': '10.0.0.30',
                        'end': '10.0.0.240'
                    }
                ]
            },
        }
        re = requests.post(self.url + "subnets", data=json.dumps(body), headers=self.headers).json()
        print(re)

    def main_network(self):
        re = requests.get(self.url + "networks", headers=self.headers).json()
        name = re["networks"]
        for net_name in name:
            print(net_name['name'])
            if net_name['name'] == self.body['network']['name']:
                requests.delete(self.url + f"networks/{net_name['id']}", headers=self.headers)
                print('删除同名网络....新建网络中...')
        else:
            self.extnet_flat()
            self.extsubnet_flat()
            print('新建网络成功')


if __name__ == '__main__':
    net = networks_flat()
    net.main_network()

创建router(路由器)

vim router.py
#!/usr/bin/python3
import json, requests
from auth import get_token
from network_flat import networks_flat


class router:
    headers = {}
    headers['X-Auth-Token'] = get_token()
    url = "http://10.0.0.100:9696/v2.0/"

    re = requests.get(url + "networks", headers=headers).json()
    name = re['networks']
    for net_id in name:
        if net_id['name'] == networks_flat.body['network']['name']:
            ext_flat = net_id['id']
            sub_flat = net_id['subnets'][0]

    body = {
        "router": {
            "name": "ext_router",
            'external_gateway_info': {
                'network_id': ext_flat,
                'enable_snat': True,
                'external_fixed_ips': [
                    {'subnet_id': sub_flat}
                ]
            },
        }
    }

    def create_router(self):
        re = requests.post(self.url + "routers", data=json.dumps(self.body), headers=self.headers).json()
        print(re)

    def main_router(self):
        re = requests.get(self.url + "routers", headers=self.headers).json()
        name = re["routers"]
        for router_id in name:
            if router_id['name'] == self.body['router']['name']:
                requests.delete(self.url + f"routers/{router_id['id']}", headers=self.headers)
                print("删除同名路由器...正在新建中...")
        else:
            self.create_router()
            print("路由新建成功...")


if __name__ == '__main__':
    r = router()
    r.main_router()

上传镜像

vim image_push.py 
#!/usr/bin/python3
import json, requests
from auth import get_token

headers = {}
headers['X-Auth-Token'] = get_token()
headers['Content-Type'] = 'application/octet-stream'
url = "http://10.0.0.100:9292"

body = {
    "name": "cirros",
    'container_format': 'bare',
    'disk_format': 'qcow2',
}


def create_image():
    re = requests.post(url + "/v2/images", data=json.dumps(body), headers=headers).json()
    print(re)
    re = requests.put(url + re["file"], data=open("D:\iso镜像\cirros-0.4.0-x86_64-disk.img", "rb"), headers=headers).status_code
    print(re)


def main_image():
    re = requests.get(url + "/v2/images", headers=headers).json()
    name = re['images']
    for image_id in name:
        if image_id['name'] == body['name']:
            requests.delete(url + f"/v2/images/{image_id['id']}", headers=headers)
            print("删除同名镜像成功.....正在新建中")
    else:
        create_image()
        print('镜像上传成功')


if __name__ == '__main__':
    main_image()

创建云主机

vim create_server.py 
#!/usr/bin/python3
import json, requests
import flavor
import image_push
from network_vxlan import networks_vxlan

from auth import get_token

headers = {}
headers['X-Auth-Token'] = get_token()
url = "http://10.0.0.100:8774/v2.1/servers"


image_ret = requests.get("http://10.0.0.100:9292/v2/images", headers=headers).json()["images"]
for image_json in image_ret:
    if image_json["name"] == image_push.body["name"]:
        image_id = image_json["id"]


flavor_ret = requests.get("http://10.0.0.100:8774/v2.1/flavors", headers=headers).json()["flavors"]
for flavor_json in flavor_ret:
    if flavor_json["name"] == flavor.body["flavor"]["name"]:
        # print(flavor_json["name"], flavor.body["flavor"]["name"])
        flavor_id = flavor_json['id']


network_ret = requests.get("http://10.0.0.100:9696/v2.0/networks/", headers=headers).json()["networks"]
for network_json in network_ret:
    if network_json["name"] == networks_vxlan.body["network"]["name"]:
        network_id = network_json["id"]


body = {
    "server": {
        "name": "vm01",
        "imageRef": image_id,
        "flavorRef": flavor_id,
        "networks": [{"uuid": network_id}]      # openstack network list
    }
}


def create_server():
    requests.post(url, data=json.dumps(body), headers=headers).json()


def main_server():
    re = requests.get(url, headers=headers).json()
    name = re['servers']
    for server_id in name:
        if server_id['name'] == body['server']['name']:
            requests.delete(url + f"/{server_id['id']}", headers=headers)
            print("删除同名云主机....正在新建中...")
    else:
        create_server()
        print("云主机创建成功")


if __name__ == '__main__':
    main_server()

绑定浮动IP

vim float_ip.py
#!/usr/bin/python3
import json, requests

from auth import get_token
import create_server
from network_flat import networks_flat

headers = {}
headers["X-Auth-Token"] = get_token()
url_float = 'http://10.0.0.100:9696/v2.0/floatingips'

url_server = "http://10.0.0.100:8774/v2.1/servers"

url_extnet = "http://10.0.0.100:9696/v2.0/networks"

server_name = requests.get(url_server, headers=headers).json()
for server_json in server_name["servers"]:
    if server_json["name"] == create_server.body["server"]["name"]:
        server_id = server_json["id"]

float_name = requests.get("http://10.0.0.100:9696/v2.0/ports", headers=headers).json()
for float_json in float_name["ports"]:
    if float_json["device_id"] == server_id:
        port_id = float_json["id"]

network_name = requests.get(url_extnet, headers=headers).json()
for extnet_json in network_name["networks"]:
    if extnet_json["name"] == networks_flat.body["network"]["name"]:
        extnet_id = extnet_json["id"]


def create_ip():
    body = {
        "floatingip": {
            "floating_network_id": extnet_id,  # 外部网络ID(创建云主机时要内部网络,要把内部网络添加到路由中)
            "port_id": port_id,  # 云主机port的ID (openstack port list)
        }
    }
    float_ret = requests.get(url_float, headers=headers).json()
    for float_data in float_ret["floatingips"]:
        if float_data["port_id"] == port_id:
            return f"浮动IP已绑定,浮动地址为{float_data['floating_ip_address']}"
    else:
        requests.post(url_float, data=json.dumps(body), headers=headers).json()
        return f"浮动IP绑定成功!"


if __name__ == '__main__':
    ret = create_ip()
    print(ret)

创建安全组

vim security.py
#!/usr/bin/python3
import json, requests

from auth import get_token

headers = {}
headers["X-Auth-Token"] = get_token()
url = "http://10.0.0.100:9696/v2.0/security-groups/"
body1 = {
    "security_group": {
        "name": "sec_group",
    }
}
sec_body = requests.get(url, headers=headers).json()
sec_ret = sec_body["security_groups"]


def security_group_create():
    for sec_json in sec_ret:
        if sec_json["name"] == body1["security_group"]["name"]:
            requests.delete(url + f"{sec_json['id']}", headers=headers)
            print("安全组删除成功....正在重新创建....")
    else:
        res = requests.post(url, data=json.dumps(body1), headers=headers).json()
        print("安全组创建成功")
        return res


sec_rules_id = security_group_create()["security_group"]["id"]


def security_group_rules_create():
    url = "http://10.0.0.100:9696/v2.0/security-group-rules/"

    def port(max, min):
        body = {
            'security_group_rule': {
                'direction': 'egress',
                'protocol': 'tcp',
                'ethertype': 'IPv4',
                'remote_ip_prefix': "0.0.0.0/24",
                'port_range_max': max,
                'port_range_min': min,
                'security_group_id': sec_rules_id,
            }
        }
        return body

    list = [port(20, 20), port(21, 21), port(22, 22), port(80, 80), port(3306, 3306)]
    result = []
    for body in list:
        body = requests.post(url, data=json.dumps(body), headers=headers).json()
        result.append(body)
    # print([port for port in result])


if __name__ == '__main__':
    security_group_rules_create()

VPN打通VXLAN之OpenVPN

controller1机器上传镜像

openstack image create "centos7_5" --disk-format qcow2  --file CentOS_7.5_x86_64_XD.qcow2
  • OpenStack平台操作
    • 利用镜像创建云主机,使用vxlan网络,绑定flat网络
  • 部署OpenVPN

安装基本工具

rm -rf /etc/yum.repos.d/*

curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo

curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo

yum install openvpn -y      # openvpn服务端

yum install easy-rsa -y     # 证书管理工具

rpm -ql easy-rsa

rpm -ql openvpn

copy配置文件

# openvpn server配置文件
cp /usr/share/doc/openvpn-2.4.12/sample/sample-config-files/server.conf /etc/openvpn/

# 证书管理工具
cp -r /usr/share/easy-rsa/ /etc/openvpn/easyrsa-server

# 变量配置文件(名称固定是vars)
cp /usr/share/doc/easy-rsa-3.0.8/vars.example /etc/openvpn/easyrsa-server/3/vars


cd /etc/openvpn/easyrsa-server/3/

tree 
.
├── easyrsa
├── openssl-easyrsa.cnf
├── vars
└── x509-types
├── ca
├── client
├── code-signing
├── COMMON
├── server
└── serverClient

初始化pki环境和CA签发机构

# 查看当前路径
pwd
/etc/openvpn/easyrsa-server/3

# 生成pki目录用户保存证书
./easyrsa  init-pki

创建CA机构

  • 结构
    • key:openvpn私钥

    • crt:openvpn公钥

    • req(csr):签发crt的密钥申请文件

    • ca:自签名证书机构

# 当前路径
pwd
/etc/openvpn/easyrsa-server/3

# 更改ca的有效时间
vim vars
''''
125 set_var EASYRSA_CA_EXPIRE       7300
''''

# 创建ca并不使用密码
./easyrsa build-ca nopass
输入描述信息,回车即可

# 验证ca的公钥
ll pki/ca.crt 
-rw------- 1 root root 1172 Dec 25 18:57 pki/ca.crt

# 验证ca的私钥
ll pki/private/ca.key 
-rw------- 1 root root 1679 Dec 25 18:57 pki/private/ca.key

创建OpenVpn服务端证书(私钥)

# 当前位置
pwd
/etc/openvpn/easyrsa-server/3

# 创建server申请证书且不使用密码
./easyrsa gen-req server nopass
描述信息不用写,直接回车
生成完成检查一下是否存在server的私钥和公钥,在提示信息中

签发OpenVpn服务端证书

  • 使用自建的ca签发服务器证书,即生成服务端crt公钥,crt公钥后期将用户发送给客户端,从而实现与openvpn server端加密传输数据
# 当前位置
pwd
/etc/openvpn/easyrsa-server/3

# 签发服务端证书,备注信息为server
./easyrsa sign-req server server
是否确认签发,yes

# 验证生成的服务端公钥
ll /etc/openvpn/easyrsa-server/3/pki/issued/server.crt

创建 Diffie-Hellman密钥

  • 这个密钥交换方法,由惠特菲尔德·迪菲(Bailey Whitfield Diffie)、马丁·赫尔曼(Martin Edward Hellman)于1976年发表。

  • 它是一种安全协议,让双方在完全没有对方任何预先信息的条件下通过不安全信道建立起一个密钥,这个密钥一般作为“对称加密”的密钥而被双方在后续数据传输中使用。DH数学原理是base离散对数问题。做类似事情的还有非对称加密类算法,如:RSA。

# 当前位置
pwd
/etc/openvpn/easyrsa-server/3

./easyrsa gen-dh

创建客户端证书

  • 复制客户端配置文件
cp -r /usr/share/easy-rsa/ /etc/openvpn/easyrsa-client/
cp /usr/share/doc/easy-rsa-3.0.8/vars.example /etc/openvpn/easyrsa-client/vars
  • 生成pki目录
cd /etc/openvpn/easyrsa-client/3

./easyrsa init-pki
  • 生成客户端证书
cd /etc/openvpn/easyrsa-client/3

# 证书为xier,没有设置密码
./easyrsa gen-req xier nopass

签发客户端证书

  • 在openvpn server目录中签发客户端证书
cd /etc/openvpn/easyrsa-server/3/

# server端导入req文件
./easyrsa import-req /etc/openvpn/easyrsa-client/3/pki/reqs/xier.req xier
  • server端签发客户端证书
./easyrsa sign-req client xier
yes

# 根据提示验证
ll /etc/openvpn/easyrsa-server/3/pki/issued/xier.crt

复制server证书到certs目录统一管理

mkdir /etc/openvpn/certs

cd /etc/openvpn/certs

cp /etc/openvpn/easyrsa-server/3/pki/ca.crt .

cp /etc/openvpn/easyrsa-server/3/pki/issued/server.crt .

cp /etc/openvpn/easyrsa-server/3/pki/private/server.key .

cp /etc/openvpn/easyrsa-server/3/pki/dh.pem .

mv dh.pem dh2048.pem    # 名称可自定义,但是配置文件中需要对应

复制client证书到指定目录统一管理

mkdir /etc/openvpn/client/xier

cd /etc/openvpn/client/xier

cp /etc/openvpn/easyrsa-server/3/pki/ca.crt .

cp /etc/openvpn/easyrsa-server/3/pki/issued/xier.crt .

cp /etc/openvpn/easyrsa-client/3/pki/private/xier.key .

配置server配置文件

vim /etc/openvpn/server.conf     # 下面有完整的文件,下面主要做解释作用
local 0.0.0.0

proto tcp
#proto udp

#dev tap   创建一个以太网隧道,以太网使用tap
dev tun    # 创建一个路由IP隧道,互联网使用tun

#;dev-node MyTap    windwos需要开启,Linux不需要

ca /etc/openvpn/certs/ca.crt

cert /etc/openvpn/certs/server.crt

key /etc/openvpn/certs/server.key

dh /etc/openvpn/certs/dh2048.pem

# topology subnet    网络拓扑,不需要配置

server 10.8.0.0 255.255.0.0   # 客户端连接后分配IP的地址池,服务器默认会占用第一个IP 10.8.0.1。地址数量决定客户数量

#ifconfig-pool-persist ipp.txt   为客户端分配固定IP,不需要配置

#;server-bridge 10.8.0.4 xxxxx    配置桥接,不需要

#;server-bridge

push "route 172.29.0.0 255.255.255.0"   # 生成连接后端服务器的路由表,如果后端服务器有多个地址段,那么都写进去即可
push "route 172.28.0.0 255.255.248.0"   # 决定能连那些服务器
push "route 10.20.0.10 255.255.255.0"

#;client-config-dir ccd     # 为指定的客户端添加路由,不需要配置

#;route xxxxx

#;learn-address ./script

#;push "redirect-gateway def1 bypass-dhcp"     启用后,客户端所有流量都将通过VPN服务器,因此不需要配置。可以做翻墙

#;push "dhcp-optionxxxx DNS"

#;push "dhcp-optionxxxx DNS"

#;client-to-client     # 允许不同的client通过openvpn server直接通信,不开启

duplicate-cn     # 多个用户共用一个账户,一般用于测试环境,生产环境都是一个用户一个证书

keepalived 10 120     # 设置服务端检测的间隔和超时时间,默认为每10秒ping一次,如果120秒没有回应则认对方已经down

tls-auth /etc/openvpn/certs/ta.key 0    # 使用一下命令生成:openvpn --genkey --secret /etc/openvpn/certs/ta.key 服务端和每个客户端都需要拥有该密钥,cp /etc/openvpn/certs/ta.key /etc/openvpn/client/xier/ 第二个参数服务端为"0",客户端为"1"

cipher AES-265-CBC     # 加密算法

#;compress lz4-v2     # 启用压缩
#;push "compress lz4-v2"   # 启用压缩算法
#;comp-lzo      # 旧户端兼容的压缩配置,需要客户端配置开启压缩

max-clients 4096     # 最大客户端数

user openvpn
group openvpn

#persist-key    重启openvpn服务,重新读取keys文件,否则保留使用第一次的keys文件,不开启
#persist-tun    重启openvpn服务,一直保持tun或者tap设备是up的,否则会先down,然后再up,不开启

status /var/log/openvpn/openvpn-status.log    # openvpn状态记录文件。每分钟会记录一次。mkdir /var/log/openvpn

#;log openvpn.log      日志记录方式和路径,log会在openvpn启动时清空日志文件

verb 3     # 设置日志级别,0-9,级别越高记录的内容越详细

mute 20    # 相同类别的信息只有前20条会输出到日志文件中

#explicit-exit-notify 1    # 通知客户端,在服务端重启后可以自动重新连接,仅能用于udp模式,tcp模式不需要配置即可实现断开重连接,且tcp配置后会导致openvpn服务无法启动





==================================可直接复制配置=======================
local 0.0.0.0
port 1194
proto tcp
dev tun
ca /etc/openvpn/certs/ca.crt
cert /etc/openvpn/certs/server.crt 
dh /etc/openvpn/certs/dh2048.pem
server 10.8.0.0 255.255.255.0
push "route 166.66.66.0 255.255.255.0"
push "route 188.88.88.0 255.255.255.0"
keepalive 10 120
cipher AES-256-CBC
max-clients 4096
user openvpn
group openvpn
status /var/log/openvpn/openvpn-status.log 
verb 3
mute 20

清空防火墙规则

  • 一般云主机都没有,如果命令错误都忽略即可
systemctl stop firewalld

systemctl disable firewalld

yum install iptables-server iptables -y

systemctl enable --now iptables

# 清空规则
iptables -F
iptables -X
iptables -Z
iptables -t nat -F
iptables -t nat -X
iptables -t nat -Z

开启路由转发

vim /etc/sysctl.conf
net.ipv4.ip_forward = 1

sysctl -p

创建iptables规则

iptables -t nat -A POSTROUTING -s 10.8.0.0/16 -j MASQUERADE

iptables -A INPUT -p TCP --dport 1194 -j ACCEPT

iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT

iptables-save

# 验证防火墙规则
iptables -vnL

创建日志目录并授权

mkdir /var/log/openvpn

chown openvpn.openvpn /var/log/openvpn

启动openvpn服务

systemctl enable --now openvpn@server

Windows PC安装openvpn客户端

  • 客户端下载地址: 1. https://openvpn.net/community-downloads/
    1. https://swupdate.openvpn.org/community/releases/OpenVPN-2.5.4-I604-amd64.msi
  • Windows直接安装即可

客户端配置文件

cd /etc/openvpn/client/xier

grep -Ev "^(#|$|;)" /usr/share/doc/openvpn-2.4.12/sample/sample-config-files/client.conf > /etc/openvpn/client/xier/client.ovpn

cp /etc/openvpn/certs/ta.key .

vim /etc/openvpn/client/xier/client.ovpn 
client    # 声明自己是客户端
dev tun	  # 接口类型,必须和服务端保持一致
proto tcp # 使用的协议,必须和服务端保持一致
remote 浮动地址 1194  # server端的ip和端口,可以写域名但是需要解析成ip
resolv-retry infinite  # 如果写的是server端的域名,那么就始终解析,如果域名发生变化,会重新连接到新的域名对应的ip
nobind    # 本机不绑定监听端口,客户端是随机打开端口连接到服务端的1194
#persist-key
#persist-tun
ca ca.crt
cert xier.crt
key xier.key
remote-cert-tls server   # 指定采用服务器校验方式
tls-auth ta.key 1
cipher AES-256-CBC
verb 3

将当前证书与配置文件打包

cd /etc/openvpn/client/xier

tar cvf xier.tar.gz ./*

# 导出xier.tar.gz压缩包至Windows中
yum install -y lrzsz

sz -y xier.tar.gz
  • 将证书文件导入到windows中:
    • windows安装openvpn的目录找到config

    • 将导出来的压缩包解压放入即可

  • Windows客户端连接VPN,访问VXLAN网络测试
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

努力的小T

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值