2 基础服务:
2.1 mariadb部署 - All controller
#每台controller安装,这里以controller003为例
#安装mariadb源并安装
apt-get install software-properties-common
apt-key adv --fetch-keys 'https://mariadb.org/mariadb_release_signing_key.asc'
add-apt-repository 'deb [arch=amd64,arm64,ppc64el] https://mirrors.ustc.edu.cn/mariadb/repo/10.5/ubuntu focal main'
apt update
apt install mariadb-server python3-pymysql galera-4
#启动数据库服务,并将其配置开机启动:
systemctl enable mariadb.service
#通过运行mysql_secure_installation脚本来重设密码,我这边设置的是devops:
mysql_secure_installation
#编辑配置文件,绑定地址设置为控制节点的管理IP地址,以允许其他节点通过管理网络进行访问:
#vim /etc/mysql/mariadb.conf.d/50-server.cnf
[mysqld]
bind-address = 192.168.1.3
max_connections = 4096
connect_timeout = 5
interactive_timeout = 3600
wait_timeout = 3600
max_allowed_packet = 32M
thread_cache_size = 128
sort_buffer_size = 4M
bulk_insert_buffer_size = 16M
tmp_table_size = 32M
max_heap_table_size = 32M
myisam_recover_options = BACKUP
key_buffer_size = 128M
table_open_cache = 400
myisam_sort_buffer_size = 512M
concurrent_insert = 2
read_buffer_size = 2M
read_rnd_buffer_size = 1M
query_cache_limit = 128K
query_cache_size = 64M
log_warnings = 2
expire_logs_days = 10
max_binlog_size = 100M
default_storage_engine = InnoDB
innodb_buffer_pool_size = 256M
innodb_log_buffer_size = 8M
innodb_file_per_table = 1
innodb_open_files = 400
innodb_io_capacity = 400
innodb_flush_method = O_DIRECT
collation_server = utf8_general_ci
character_set_server = utf8
[galera]
wsrep_on=ON
wsrep_provider=/usr/lib/galera/libgalera_smm.so
wsrep_cluster_address="gcomm://192.168.1.3,192.168.1.4,192.168.1.5"
binlog_format=row
default_storage_engine=InnoDB
innodb_autoinc_lock_mode=2
bind-address=192.168.1.3
# any cluster name
wsrep_cluster_name="Openstack_Cluster"
# own IP address
wsrep_node_name=controller003
wsrep_node_address="192.168.1.4"
innodb_doublewrite=1
#停止全部控制节点的mariadb服务,以controller003节点为例
systemctl stop mariadb.service
#任选1个控制节点以如下方式启动mariadb服务,这里选择controller003节点
[root@controller003 ~]# galera_new_cluster
#其他控制节点加入mariadb集群,以controller004节点为例;
#启动后加入集群,controller004/controller005节点从controller003节点同步数据,也可同步查看mariadb日志/var/log/mariadb/mariadb.log
[root@controller004 ~]# systemctl start mariadb.service
[root@controller005 ~]# systemctl start mariadb.service
#重新启动controller003节点;
#启动前删除controller003节点的数据
[root@controller003 ~]# rm -rf /var/lib/mysql/*
[root@controller003 ~]# systemctl restart mariadb.service
#查看集群状态
[root@controller003 ~]# mysql -uroot -p
Enter password:
MariaDB [(none)]> show status like "wsrep_cluster_size";
+--------------------+-------+
| Variable_name | Value |
+--------------------+-------+
| wsrep_cluster_size | 3 |
+--------------------+-------+
1 row in set (0.001 sec)
MariaDB [(none)]> SHOW status LIKE 'wsrep_ready';
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| wsrep_ready | ON |
+---------------+-------+
1 row in set (0.001 sec)
2.2 RabbitMQ部署 - All controller
#OpenStack使用消息队列来协调服务之间的操作和状态信息。消息队列服务通常在控制节点上运行。OpenStack支持多个消息队列服务,包括RabbitMQ、Qpid和ZeroMQ。如果您希望实现不同的消息队列服务,请参考与之相关的文档。
2.2.1 添加rabbitmq-erlang源
#vim /etc/apt/sources.list.d/rabbitmq.list
[rabbitmq-erlang]
deb https://packages.erlang-solutions.com/ubuntu focal contrib
#更新源
apt update
#安装rabbitmq
apt install rabbitmq-server -y
#以controller003为例,启动rabbitmq,并将其配置开机启动:
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service
systemctl status rabbitmq-server.service
2.2.2 构建rabbitmq集群
#新建节点标识文件
tee /etc/rabbitmq/rabbitmq-env.conf << EOF
RABBITMQ_NODENAME=rabbit@controller003
RABBITMQ_NODE_IP_ADDRESS=192.168.1.3
RABBITMQ_NODE_PORT=5672
EOF
#任选1个控制节点首先停止所有节点rabbitmq服务,这里选择controller003节点
[root@controller003 ~]#systemctl stop rabbitmq-server.service
#分发.erlang.cookie
[root@controller003 ~]#scp /var/lib/rabbitmq/.erlang.cookie root@192.168.1.4:/var/lib/rabbitmq/
[root@controller003 ~]#scp /var/lib/rabbitmq/.erlang.cookie root@192.168.1.5:/var/lib/rabbitmq/
#修改controller004/005节点.erlang.cookie文件的用户/组,以controller004节点为例
[root@controller004 ~]#chmod 400 /var/lib/rabbitmq/.erlang.cookie
#注意修改全部控制节点.erlang.cookie文件的权限,默认即400权限,可不修改
[root@controller004 ~]#ll /var/lib/rabbitmq/.erlang.cookie
#启动所有节点的rabbitmq服务
[root@controller003 ~]#systemctl start rabbitmq-server
[root@controller004 ~]#systemctl start rabbitmq-server
[root@controller005 ~]#systemctl start rabbitmq-server
#构建集群,controller004/005节点以ram节点的形式加入集群
[root@controller004 ~]#rabbitmqctl stop_app
[root@controller004 ~]#rabbitmqctl join_cluster --ram rabbit@controller003
[root@controller004 ~]#rabbitmqctl start_app
#检查是否创建成功
[root@controller003 ~]# rabbitmqctl cluster_status
Cluster status of node rabbit@controller003 ...
Basics
Cluster name: rabbit@controller003
Disk Nodes
rabbit@controller003
RAM Nodes
rabbit@controller004
rabbit@controller005
Running Nodes
rabbit@controller003
rabbit@controller004
rabbit@controller005
Versions
rabbit@controller003: RabbitMQ 3.8.2 on Erlang 22.2.7
rabbit@controller004: RabbitMQ 3.8.2 on Erlang 22.2.7
rabbit@controller005: RabbitMQ 3.8.2 on Erlang 22.2.7
#添加openstack用户,关设置密码为rabbitmq.123:
rabbitmqctl add_user rabbitmq rabbitmq.123
rabbitmqctl set_user_tags rabbitmq administrator
#允许openstack用户的配置、写和读访问:
rabbitmqctl set_permissions -p "/" rabbitmq ".*" ".*" ".*"
#查看账号
rabbitmqctl list_users
#镜像队列ha
# 设置镜像队列高可用
rabbitmqctl set_policy ha-all "^" '{"ha-mode":"all"}'
# 查看镜像队列策略
rabbitmqctl list_policies
#安装web管理插件
# 在全部控制节点安装web管理插件,以controller003节点为例
rabbitmq-plugins enable rabbitmq_management
访问任意节点,如:http://192.168.1.3:15672
2.3 Memcached部署 - All controller
#身份服务验证机制使用Memcached来缓存令牌。memcached服务通常在控制节点上运行。
apt install memcached python3-memcache -y
#将服务配置为控制节点的管理IP地址。这是为了允许其他节点通过管理网络访问:
#vim /etc/memcached.conf
-m 4096 #配置内存
-l 192.168.1.3 #设置监听地址,一般为本机非公网IP
#启动Memcached服务,并配置为开机启动:
systemctl enable memcached.service
systemctl start memcached.service
systemctl status memcached.service
2.4 Etcd部署 - All controller
#OpenStack服务可以使用Etcd,这是一种分布式可靠的键值存储,用于分布式密钥锁定、存储配置、跟踪服务生存周期和其他场景
apt install etcd -y
#修改配置文件为控制节点的管理IP地址,使其他节点能够通过管理网络进行访问:
#vim /etc/default/etcd
ETCD_NAME="controller003"
ETCD_DATA_DIR="/var/lib/etcd"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER="controller003=http://192.168.1.3:2380,controller004=http://192.168.1.4:2380,controller005=http://192.168.1.5:2380"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.1.3:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.1.3:2379"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.1.3:2379,http://127.0.0.1:2379"
#修改 etcd.service
#vim /usr/lib/systemd/system/etcd.service
[Unit]
Description=etcd - highly-available key value store
Documentation=https://github.com/coreos/etcd
Documentation=man:etcd
After=network.target
Wants=network-online.target
[Service]
Environment=DAEMON_ARGS=
Environment=ETCD_NAME=%H
Environment=ETCD_DATA_DIR=/var/lib/etcd/default
EnvironmentFile=-/etc/default/%p
Type=notify
User=etcd
PermissionsStartOnly=true
#ExecStart=/bin/sh -c "GOMAXPROCS=$(nproc) /usr/bin/etcd $DAEMON_ARGS"
#ExecStart=/usr/bin/etcd $DAEMON_ARGS
#Restart=on-abnormal
#RestartSec=10s
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd \
--name=\"${ETCD_NAME}\" \
--data-dir=\"${ETCD_DATA_DIR}\" \
--listen-peer-urls=\"${ETCD_LISTEN_PEER_URLS}\" \
--listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\" \
--initial-advertise-peer-urls=\"${ETCD_INITIAL_ADVERTISE_PEER_URLS}\" \
--advertise-client-urls=\"${ETCD_ADVERTISE_CLIENT_URLS}\" \
--initial-cluster=\"${ETCD_INITIAL_CLUSTER}\" \
--initial-cluster-token=\"${ETCD_INITIAL_CLUSTER_TOKEN}\" \
--initial-cluster-state=\"${ETCD_INITIAL_CLUSTER_STATE}\""
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
Alias=etcd2.service
#依次启动每个节点的etcd服务,并配置为开机启动:
systemctl enable etcd
systemctl start etcd
systemctl status etcd
#验证etcd
[root@controller003 ~]# etcdctl cluster-health
member c8750a07a2008c34 is healthy: got healthy result from http://192.168.1.4:2379
member ddc8bcfe3f80cd5d is healthy: got healthy result from http://192.168.1.3:2379
member fc456eb6d491b837 is healthy: got healthy result from http://192.168.1.5:2379
cluster is healthy
[root@controller003 ~]# etcdctl member list
c8750a07a2008c34: name=controller004 peerURLs=http://192.168.1.4:2380 clientURLs=http://192.168.1.4:2379 isLeader=false
ddc8bcfe3f80cd5d: name=controller003 peerURLs=http://192.168.1.3:2380 clientURLs=http://192.168.1.3:2379 isLeader=true
fc456eb6d491b837: name=controller005 peerURLs=http://192.168.1.5:2380 clientURLs=http://192.168.1.5:2379 isLeader=false
至此,基础服务已部署完毕,如有问题请联系我改正,感激不尽!
X.过程中遇到的问题
eg.1.
解决方案: