三节点单控Pike按照官方安装+ceph集成

####安照官网安装
###主机信息

13.13.59.11 controller
13.13.59.12 compute1
13.13.59.24 ceph24

### 都做bond  bond 模式为bond0  
### 网络 采用selfService  

###### 环境准备
###在控制节点设置

ssh-keygen
 ssh-copy-id -i .ssh/id_rsa.pub root@13.13.59.18
 ssh-copy-id -i .ssh/id_rsa.pub root@13.13.59.24

###设置hosts
 

for i in 10 18 24;do ssh 13.13.59.$i  "echo '
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
13.13.59.10 controller
13.13.59.18 compute1
13.13.59.24 ceph24' > /etc/hosts";done

### 设置selinux

###设置firewalld

### 设置主机名

hostnamectl set-hostname controller
ssh 13.13.59.18 "hostnamectl set-hostname compute1"
hostnamectl set-hostname ceph24

####### 设置ntp 服务

for i in 10 18 24;do ssh 13.13.59.$i  "chronyc sources ";done
for i in 10 18 24;do ssh 13.13.59.$i "date ";done

###设置openstack源
 

for i in 10 18 24;do ssh 13.13.59.$i  "yum install centos-release-openstack-pike epel-release -y ";done
for i in 10 18 24;do ssh 13.13.59.$i " yum upgrade -y && reboot   ";done
for i in 10 18 24;do ssh 13.13.59.$i  " yum install python-openstackclient openstack-selinux  -y";done

######################################## 在controller 节点上操作#############################
### 安装数据库
 

yum install mariadb mariadb-server python2-PyMySQL -y

## 创建一个openstack.cnf

echo '
[mysqld]
bind-address = 13.13.59.10

default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
' > /etc/my.cnf.d/openstack.cnf

###启动数据库

systemctl enable mariadb.service
systemctl start mariadb.service
systemctl status mariadb.service

##初始化数据库

[[ -f /usr/bin/expect ]] || { yum install expect -y; } #若没expect则安装
/usr/bin/expect << EOF
set timeout 30
spawn mysql_secure_installation
expect {
    "enter for none" { send "\r"; exp_continue}
    "Y/n" { send "Y\r" ; exp_continue}
    "password:" { send "Test@2018\r"; exp_continue}
    "new password:" { send "Test@2018\r"; exp_continue}
    "Y/n" { send "Y\r" ; exp_continue}
    eof { exit }
}
EOF

### 查看设置是否生效
 

mysql -uroot -pTest@2018 -e "show variables like 'max_connections';"
+-----------------+-------+
| Variable_name   | Value |
+-----------------+-------+
| max_connections | 214   |
+-----------------+-------+

1 row in set (0.00 sec)
 ##这是由于mariadb有默认打开文件数限制。可以通过配置/usr/lib/systemd/system/mariadb.service来调大打开文件数目。
 ###设置 最大连接数不生效 在  "/usr/lib/systemd/system/mariadb.service"  修改参数
 

##############################################################################
 #匹配行前加                
##sed -i '/allow 361way.com/iallow www.361way.com' the.conf.file
#匹配行前后
##sed -i '/allow 361way.com/aallow www.361way.com' the.conf.file
 ##############################################################################

 ### 在[Service] 之后连续插入两行 参数

sed -i '/\[Service\]/a\LimitNOFILE=10000\nLimitNPROC=10000' /usr/lib/systemd/system/mariadb.service

## 重启服务

systemctl daemon-reload
 systemctl restart mariadb
mysql -uroot -pTest@2018 -e "show variables like 'max_connections';"
+-----------------+-------+
| Variable_name   | Value |
+-----------------+-------+
| max_connections | 4096  |
+-----------------+-------+

 ############ 安装 rabbitmq-server
 

yum install rabbitmq-server -y
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service
systemctl status rabbitmq-server

### 设置用户也密码
 

rabbitmqctl add_user openstack RABBIT_PASS
##Creating user "openstack" ...
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
###Setting permissions for user "openstack" in vhost "/" ...

###确认是否创建成功

rabbitmqctl list_users 

#输出一下信息表明创建成功,如果不成功,nova 的服务 openstack-nova-consoleauth.service 和openstack-nova-scheduler.service 会报错。
 

#Listing users ...
#openstack    []
#guest    [administrator]

######## 安装memcached############################
yum install memcached python-memcached -y
 ##修改memcached 配置  增加 控制节点信息
 

sed -i 's/\(OPTIONS="-l 127.0.0.1,::1\)/&,controller/'  /etc/sysconfig/memcached
systemctl enable memcached.service
systemctl start memcached.service
systemctl status memcached

#####小知识 在指定字符串之前、之后增加字符串
##sed -i 's/指定的字符/要插入的字符&/'  文件
##sed -i 's/指定的字符/&要插入的字符/'  文件
##############################################

############################################################################
### 安装 keystone 组件
############################################################################

#设置数据库

mysql -u root -pTest@2018 -e "CREATE DATABASE keystone; \
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'KEYSTONE_DBPASS'; \
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'KEYSTONE_DBPASS';"
#check setting
 mysql -u keystone -pKEYSTONE_DBPASS -e "show databases;" 

 ## 安装软件keystone
 

 yum install openstack-keystone httpd mod_wsgi openstack-utils -y 
 cp /etc/keystone/keystone.conf{,.bk}

 ### 如果没有openstack-config 命令可以安装openstack-utils 解决

 openstack-config --set /etc/keystone/keystone.conf database connection mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
 openstack-config --set /etc/keystone/keystone.conf token provider  fernet

 ### 确认设置是否正确

 egrep -v '#|^$' /etc/keystone/keystone.conf

 ###同步数据库

su -s /bin/sh -c "keystone-manage db_sync" keystone

##初始化keystone

keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
 
keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
--bootstrap-admin-url http://controller:35357/v3/ \
--bootstrap-internal-url http://controller:5000/v3/ \
--bootstrap-public-url http://controller:5000/v3/ \
--bootstrap-region-id RegionOn

##设置httpd

echo '
ServerName controller
' >> /etc/httpd/conf/httpd.conf

ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

 systemctl enable httpd.service
 systemctl start httpd.service

##设置环境变量

echo '
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://13.13.59.10:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
' > .admin_openrc.sh

###配置keystone
 

openstack project create --domain default --description "Service Project" service
openstack project create --domain default --description "Demo Project" demo
openstack user create --domain default --password=DEMO_PASS demo
openstack role create user
openstack role add --project demo --user demo user

### 验证

unset OS_AUTH_URL OS_PASSWORD
openstack --os-auth-url http://controller:35357/v3 \
--os-project-domain-name Default --os-user-domain-name Default \
--os-project-name admin --os-username admin token issue
openstack --os-auth-url http://controller:5000/v3 \
--os-project-domain-name Default --os-user-domain-name Default \
--os-project-name demo --os-username demo token issue

###设置demo 环境变量

echo '
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=DEMO_PASS
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
' > .demo_openrc.sh

##验证token

source .admin_openrc.sh
openstack token issue

####################################################################
##### 安装glance组件
############################################
##设置数据库
 

mysql -u root -pTest@2018 -e "CREATE DATABASE glance; \
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS'; \
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';"
#check setting
 mysql -u glance -pGLANCE_DBPASS -e "show databases;"

 ###设置认证
 

source .admin_openrc
 openstack user create --domain default --password=GLANCE_PASS glance
 openstack role add --project service --user glance admin
 openstack service create --name glance --description "OpenStack Image" image
 openstack endpoint create --region RegionOne image public http://controller:9292
 openstack endpoint create --region RegionOne image internal http://controller:9292
 openstack endpoint create --region RegionOne image admin http://controller:9292

##安装glance包

 yum install openstack-glance -y

 ##设置glance-api.conf

cp /etc/glance/glance-api.conf{,.bk}

#[database]
openstack-config --set /etc/glance/glance-api.conf database connection mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
#[keystone_authtoken]
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_uri  http://controller:5000
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_url  http://controller:35357
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken memcached_servers  controller:11211
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_type  password
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_domain_name  default
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken user_domain_name  default
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_name  service
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken username  glance
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken password  GLANCE_PASS
#[paste_deploy]
openstack-config --set /etc/glance/glance-api.conf paste_deploy flavor  keystone
#[glance_store]
openstack-config --set /etc/glance/glance-api.conf glance_store stores  file,http
openstack-config --set /etc/glance/glance-api.conf glance_store default_store  file
openstack-config --set /etc/glance/glance-api.conf glance_store filesystem_store_datadir  /var/lib/glance/images/

##确认配置

egrep  -v '#|^$' /etc/glance/glance-api.conf

##设置glance-registry.conf
 

cp /etc/glance/glance-registry.conf{,.bk} -y

####[keystone_authtoken]
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_uri  http://controller:5000
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_url  http://controller:35357
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken memcached_servers  controller:11211
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_type  password
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_domain_name  default
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken user_domain_name  default
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_name  service
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken username  glance
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken password  GLANCE_PASS
#[paste_deploy]
openstack-config --set /etc/glance/glance-registry.conf paste_deploy flavor  keystone

##确认配置

egrep  -v '#|^$' /etc/glance/glance-registry.conf

### 同步设置

su -s /bin/sh -c "glance-manage db_sync" glance

###启动服务

systemctl enable openstack-glance-api.service openstack-glance-registry.service
systemctl start openstack-glance-api.service openstack-glance-registry.service
systemctl status openstack-glance-api.service openstack-glance-registry.service

### 确认安装

yum install wget -y

wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
source .admin-openrc.sh
openstack image create "cirros" --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare --public

openstack image list

####################################################################
##### 安装nova组件
#####################################################################

##设置数据库
 

mysql -u root -pTest@2018 -e "
CREATE DATABASE nova_api; \
CREATE DATABASE nova; \
CREATE DATABASE nova_cell0; \
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS'; \
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS'; \
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS'; \
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS'; \
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS'; \
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS'; \
"

#check setting

 mysql -u nova -pNOVA_DBPASS -e "show databases;"

 ##设置认证
 

openstack user create --domain default --password=NOVA_PASS nova
openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
openstack user create --domain default --password=PLACEMENT_PASS placement
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778

###安装nova 组件
 

yum install -y openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api

 ###设置配置
 

 cp /etc/nova/nova.conf{,.bk}
 ###[DEFAULT]
 auth_strategy = keystone
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis  osapi_compute,metadata
openstack-config --set /etc/nova/nova.conf DEFAULT transport_url  rabbit://openstack:RABBIT_PASS@controller
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip  13.13.59.10
openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy  keystone
openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron  True
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver  nova.virt.firewall.NoopFirewallDriver
#####[api_database]
openstack-config --set /etc/nova/nova.conf api_database connection  mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
######[database]
openstack-config --set /etc/nova/nova.conf database connection  mysql+pymysql://nova:NOVA_DBPASS@controller/nova
####[keystone_authtoken]
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri  http://controller:5000
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url  http://controller:35357
openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers  controller:11211
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type  password
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name  default
openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name  default
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name  service
openstack-config --set /etc/nova/nova.conf keystone_authtoken username  nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken password  NOVA_PASS
#####[vnc]
openstack-config --set /etc/nova/nova.conf vnc enabled  true
openstack-config --set /etc/nova/nova.conf vnc vncserver_listen  '$my_ip'
openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address  '$my_ip'
#####[glance]
openstack-config --set /etc/nova/nova.conf glance api_servers  http://controller:9292
####[oslo_concurrency]
openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path  /var/lib/nova/tmp
####[placement]
openstack-config --set /etc/nova/nova.conf placement os_region_name  RegionOne
openstack-config --set /etc/nova/nova.conf placement project_domain_name  Default
openstack-config --set /etc/nova/nova.conf placement project_name  service
openstack-config --set /etc/nova/nova.conf placement auth_type  password
openstack-config --set /etc/nova/nova.conf placement user_domain_name  Default
openstack-config --set /etc/nova/nova.conf placement auth_url  http://controller:35357/v3
openstack-config --set /etc/nova/nova.conf placement username  placement
openstack-config --set /etc/nova/nova.conf placement password  PLACEMENT_PASS
####[scheduler]
openstack-config --set /etc/nova/nova.conf scheduler discover_hosts_in_cells_interval  300
egrep -v '#|^$' /etc/nova/nova.conf

###设置http配置

echo '
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>
' >> /etc/httpd/conf.d/00-nova-placement-api.conf

systemctl restart httpd
systemctl status httpd

###同步数据库

su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova

###确认

nova-manage cell_v2 list_cells

###启动服务

systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl status openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl restart openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

#################################################################################
#####安装nova的计算节点#####
#################################################################################
 

ping compute1 -c 2

ssh compute1 "yum install openstack-nova-compute -y"

ssh compute1 "egrep -v '#|^$' /etc/nova/nova.conf"
ssh compute1 "cp /etc/nova/nova.conf{,.bk}"

#####设置配置
 

ssh compute1 "
###[DEFAULT]
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis  osapi_compute,metadata;
openstack-config --set /etc/nova/nova.conf DEFAULT transport_url  rabbit://openstack:RABBIT_PASS@controller;
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip  13.13.59.18;
openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron  True;
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver  nova.virt.firewall.NoopFirewallDriver;

###[api]
openstack-config --set /etc/nova/nova.conf api auth_strategy  keystone;
####[keystone_authtoken]
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri  http://controller:5000;
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url  http://controller:35357;
openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers  controller:11211;
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type  password;
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name  default;
openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name  default;
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name  service;
openstack-config --set /etc/nova/nova.conf keystone_authtoken username  nova;
openstack-config --set /etc/nova/nova.conf keystone_authtoken password  NOVA_PASS;
#####[vnc]
openstack-config --set /etc/nova/nova.conf vnc enabled  true;
openstack-config --set /etc/nova/nova.conf vnc vncserver_listen  0.0.0.0;
openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address  '\$my_ip';
openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://controller:6080/vnc_auto.html;
#####[glance]
openstack-config --set /etc/nova/nova.conf glance api_servers  http://controller:9292;
####[oslo_concurrency]
openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path  /var/lib/nova/tmp;
####[placement]
openstack-config --set /etc/nova/nova.conf placement os_region_name  RegionOne;
openstack-config --set /etc/nova/nova.conf placement project_domain_name  Default;
openstack-config --set /etc/nova/nova.conf placement project_name  service;
openstack-config --set /etc/nova/nova.conf placement auth_type  password;
openstack-config --set /etc/nova/nova.conf placement user_domain_name  Default;
openstack-config --set /etc/nova/nova.conf placement auth_url  http://controller:35357/v3;
openstack-config --set /etc/nova/nova.conf placement username  placement;
openstack-config --set /etc/nova/nova.conf placement password  PLACEMENT_PASS;
openstack-config --set /etc/nova/nova.conf libvirt virt_type  kvm;
egrep -v '#|^$' /etc/nova/nova.conf "

##### 启动服务
 

ssh compute1 "
systemctl enable libvirtd.service openstack-nova-compute.service;
systemctl start libvirtd.service openstack-nova-compute.service;
sleep 5;
systemctl status libvirtd.service openstack-nova-compute.service
"

###在controller 节点确认安装

source .admin_openrc.sh
openstack compute service list --service nova-compute
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova

 ########################################################################################
 ##############安装neutron ####################

##设置数据库
 

mysql -u root -pTest@2018 -e "
CREATE DATABASE neutron; \
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'NEUTRON_DBPASS'; \
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'NEUTRON_DBPASS';"
#check setting
 mysql -u neutron -pNEUTRON_DBPASS -e "show databases;"

 ###设置认证
 

openstack user create --domain default --password=NEUTRON_PASS neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696

###网络为Self-service networks
##安装neutron package

yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y

##设置neutron.conf

cp /etc/neutron/neutron.conf{,.bk}
#[database]
openstack-config --set /etc/neutron/neutron.conf database connection  mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
#[DEFAULT]
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin  ml2
openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins  router
openstack-config --set /etc/neutron/neutron.conf DEFAULT allow_overlapping_ips  true
openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url  rabbit://openstack:RABBIT_PASS@controller
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy  keystone
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_status_changes  true
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes  true
##[keystone_authtoken]
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri  http://controller:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url  http://controller:35357
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers  controller:11211
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type  password
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name  default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name  default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name  service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username  neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password  NEUTRON_PASS
##[nova]
openstack-config --set /etc/neutron/neutron.conf nova auth_url  http://controller:35357
openstack-config --set /etc/neutron/neutron.conf nova auth_type  password
openstack-config --set /etc/neutron/neutron.conf nova project_domain_name  default
openstack-config --set /etc/neutron/neutron.conf nova user_domain_name  default
openstack-config --set /etc/neutron/neutron.conf nova region_name  RegionOne
openstack-config --set /etc/neutron/neutron.conf nova project_name  service
openstack-config --set /etc/neutron/neutron.conf nova username  nova
openstack-config --set /etc/neutron/neutron.conf nova password  NOVA_PASS
##[oslo_concurrency]
openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path  /var/lib/neutron/tmp
egrep -v '#|^$' /etc/neutron/neutron.conf

####设置/etc/neutron/plugins/ml2/ml2_conf.ini

cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bk} -y
#####[ml2]
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers  flat,vlan,vxlan
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types  vxlan
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers  linuxbridge,l2population
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers  port_security
##[ml2_type_flat]
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks  provider
##[ml2_type_vxlan]
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vni_ranges  1:1000
##[securitygroup]
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_ipset  true
egrep -v '#|^$' /etc/neutron/plugins/ml2/ml2_conf.ini

###设置 /etc/neutron/plugins/ml2/linuxbridge_agent.ini
 

cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bk}
##[linux_bridge]
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings  provider:bond1
##[vxlan]
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan  true
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip  13.13.59.10
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population  true
##[securitygroup]
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group  true
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver  neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
egrep -v '#|^$' /etc/neutron/plugins/ml2/linuxbridge_agent.ini

####配置三层代理 /etc/neutron/l3_agent.ini

cp /etc/neutron/l3_agent.ini{,.bk}
##[DEFAULT]
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT interface_driver  linuxbridge
egrep -v '#|^$'  /etc/neutron/l3_agent.ini

####配置 dhcp 代理 /etc/neutron/dhcp_agent.ini

cp /etc/neutron/dhcp_agent.ini{,.bk}
[DEFAULT]
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver  linuxbridge
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver  neutron.agent.linux.dhcp.Dnsmasq
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata  true
egrep -v '#|^$'  /etc/neutron/dhcp_agent.ini

###配置原数据metadata agent ##etc/neutron/metadata_agent.ini
 

cp /etc/neutron/metadata_agent.ini{,.bk}
###[DEFAULT]
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT nova_metadata_host  controller
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT metadata_proxy_shared_secret  METADATA_SECRET
egrep -v '#|^$' /etc/neutron/metadata_agent.ini

###配置nova服务增加neutron配置

cp /etc/nova/nova.conf{,.add_neutron_before}
####[neutron]
openstack-config --set /etc/nova/nova.conf neutron url  http://controller:9696
openstack-config --set /etc/nova/nova.conf neutron auth_url  http://controller:35357
openstack-config --set /etc/nova/nova.conf neutron auth_type  password
openstack-config --set /etc/nova/nova.conf neutron project_domain_name  default
openstack-config --set /etc/nova/nova.conf neutron user_domain_name  default
openstack-config --set /etc/nova/nova.conf neutron region_name  RegionOne
openstack-config --set /etc/nova/nova.conf neutron project_name  service
openstack-config --set /etc/nova/nova.conf neutron username  neutron
openstack-config --set /etc/nova/nova.conf neutron password  NEUTRON_PASS
openstack-config --set /etc/nova/nova.conf neutron service_metadata_proxy  true
openstack-config --set /etc/nova/nova.conf neutron metadata_proxy_shared_secret  METADATA_SECRET
egrep -v '#|^$' /etc/nova/nova.conf

### neutron 配置完成,下面进行同步数据库

##### 同步数据库

ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

##启动服务
 

systemctl restart openstack-nova-api.service

systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl status neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl enable neutron-l3-agent.service
systemctl start neutron-l3-agent.service
systemctl status neutron-l3-agent.service


#systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service  neutron-l3-agent.service
#systemctl status neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service  neutron-l3-agent.service

####neutron 在计算节点上设置
 

ssh compute1 "yum install openstack-neutron-linuxbridge ebtables ipset -y "

###设置配置 /etc/neutron/neutron.conf
 

ssh compute1 "
cp /etc/neutron/neutron.conf{,.bk};
##[DEFAULT];
openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url  rabbit://openstack:RABBIT_PASS@controller;
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy  keystone;
##[keystone_authtoken];
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri  http://controller:5000;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url  http://controller:35357;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers  controller:11211;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type  password;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name  default;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name  default;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name  service;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username  neutron;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password  NEUTRON_PASS;
##[oslo_concurrency];
openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path  /var/lib/neutron/tmp;
egrep -v '#|^$' /etc/neutron/neutron.conf "

####

###设置 /etc/neutron/plugins/ml2/linuxbridge_agent.ini

ssh compute1 "
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bk};
##[linux_bridge];
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings  provider:bond1;
##[vxlan];
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan  true;
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip  13.13.59.18;
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population  true;
##[securitygroup];
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group  true;
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver  neutron.agent.linux.iptables_firewall.IptablesFirewallDriver;
egrep -v '#|^$' /etc/neutron/plugins/ml2/linuxbridge_agent.ini "

###设置计算节点 nova.conf 的配置增加neutron 服务
 

ssh compute1 "cp /etc/nova/nova.conf{,.add_neutron_before};
####[neutron];
openstack-config --set /etc/nova/nova.conf neutron url  http://controller:9696;
openstack-config --set /etc/nova/nova.conf neutron auth_url  http://controller:35357;
openstack-config --set /etc/nova/nova.conf neutron auth_type  password;
openstack-config --set /etc/nova/nova.conf neutron project_domain_name  default;
openstack-config --set /etc/nova/nova.conf neutron user_domain_name  default;
openstack-config --set /etc/nova/nova.conf neutron region_name  RegionOne;
openstack-config --set /etc/nova/nova.conf neutron project_name  service;
openstack-config --set /etc/nova/nova.conf neutron username  neutron;
openstack-config --set /etc/nova/nova.conf neutron password  NEUTRON_PASS;
egrep -v '#|^$' /etc/nova/nova.conf "

####启动计算几点服务
 

ssh compute1 "
systemctl restart openstack-nova-compute.service;
sleep 5;
systemctl status openstack-nova-compute.service;
systemctl enable neutron-linuxbridge-agent.service;
systemctl start neutron-linuxbridge-agent.service;
sleep 5;
systemctl status neutron-linuxbridge-agent.service "

#####在控制节点确认 安装
 

###
source .admin_openrc.sh
openstack network agent list

################################################################################################
##############################安装dashboard 组件##################################################################
################################################################################################

##在控制节点上安装

yum install openstack-dashboard -y

####设置配置
 

cp /etc/openstack-dashboard/local_settings{,.bk}

sed -i 's#_member_#user#g' /etc/openstack-dashboard/local_settings
sed -i 's#OPENSTACK_HOST = "127.0.0.1"#OPENSTACK_HOST = "controller"#'  /etc/openstack-dashboard/local_settings
sed -i "/ALLOWED_HOSTS/cALLOWED_HOSTS = ['*', ]" /etc/openstack-dashboard/local_settings
sed -i 's#UTC#Asia/Shanghai#g' /etc/openstack-dashboard/local_settings
sed -i 's#%s:5000/v2.0#%s:5000/v3#' /etc/openstack-dashboard/local_settings
sed -i '/ULTIDOMAIN_SUPPORT/cOPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True' /etc/openstack-dashboard/local_settings
sed -i "s@^#OPENSTACK_KEYSTONE_DEFAULT@OPENSTACK_KEYSTONE_DEFAULT@" /etc/openstack-dashboard/local_settings

### 找到以下配置,去掉 # 即可
OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 2,
}

SESSION_ENGINE = \'django.contrib.sessions.backends.cache\'

CACHES = {
    'default': {
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
         'LOCATION': 'controller:11211',
    }
}

##启动服务
 

systemctl restart httpd.service memcached.service

###登录web页面 http://controller/dashboard
#如果登录提示错误,请修改/etc/httpd/conf.d/openstack-dashboard.conf  增加配置
#WSGIApplicationGroup %{GLOBAL}
 

echo "
WSGIApplicationGroup %{GLOBAL}
" >> /etc/httpd/conf.d/openstack-dashboard.conf

##重启http 服务 即可

################################################################################################
##############################安装cinder组件##################################################################
################################################################################################

### 安装存储节点 13.13.59.24 ceph24

yum install lvm2 device-mapper-persistent-data
systemctl enable lvm2-lvmetad.service
systemctl start lvm2-lvmetad.service

##配置存储节点服务
 

cp /etc/cinder/cinder.conf{,.bk}
##[database]
openstack-config --set /etc/cinder/cinder.conf database connection  mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
##[DEFAULT]
openstack-config --set /etc/cinder/cinder.conf DEFAULT transport_url  rabbit://openstack:RABBIT_PASS@controller
openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy  keystone
openstack-config --set /etc/cinder/cinder.conf DEFAULT my_ip  13.13.59.24
openstack-config --set /etc/cinder/cinder.conf DEFAULT enabled_backends  lvm
openstack-config --set /etc/cinder/cinder.conf DEFAULT glance_api_servers  http://controller:9292
##[keystone_authtoken]
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_uri  http://controller:5000
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_url  http://controller:35357
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers  controller:11211
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_type  password
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name  default
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name  default
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_name  service
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken username  cinder
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken password  CINDER_PASS
###[lvm]
openstack-config --set /etc/cinder/cinder.conf lvm volume_driver  cinder.volume.drivers.lvm.LVMVolumeDriver
openstack-config --set /etc/cinder/cinder.conf lvm volume_group  cinder-volumes
openstack-config --set /etc/cinder/cinder.conf lvm iscsi_protocol  iscsi
openstack-config --set /etc/cinder/cinder.conf lvm iscsi_helper  lioadm
##[oslo_concurrency]
openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path  /var/lib/cinder/tmp
egrep -v '#|^$' /etc/cinder/cinder.conf

###启动服务

systemctl enable openstack-cinder-volume.service target.service
systemctl start openstack-cinder-volume.service target.service
systemctl status openstack-cinder-volume.service target.service


################################################################################################
### cinder 控制节点安装################################################################################################
################################################################################################

##数据库设置

 

mysql -uroot -pTest@2018 -e "
CREATE DATABASE cinder; \
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'CINDER_DBPASS'; \
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'CINDER_DBPASS'; "

mysql -ucinder -pCINDER_DBPASS -e "show databases;"

###创建keystone 认证

 

source .admin_openrc.sh

openstack user create --domain default --password=CINDER_PASS cinder
openstack role add --project service --user cinder admin
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 public http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://controller:8776/v3/%\(project_id\)s


##安装cinder package

 

yum install openstack-cinder -y

##cinder配置设置

 

cp /etc/cinder/cinder.conf{,.bk}
##[database]
openstack-config --set /etc/cinder/cinder.conf database connection  mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
##[DEFAULT]
openstack-config --set /etc/cinder/cinder.conf DEFAULT transport_url  rabbit://openstack:RABBIT_PASS@controller
openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy  keystone
openstack-config --set /etc/cinder/cinder.conf DEFAULT my_ip  13.13.59.10
##[keystone_authtoken]
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_uri  http://controller:5000
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_url  http://controller:35357
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers  controller:11211
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_type  password
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name  default
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name  default
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_name  service
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken username  cinder
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken password  CINDER_PASS
##[oslo_concurrency]
systemctl restart openstack-nova-api.service

systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl status openstack-cinder-api.service openstack-cinder-scheduler.service


openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmpegrep -v '#|^$' /etc/cinder/cinder.conf###同步数据库

 

su -s /bin/sh -c "cinder-manage db sync" cinder

###配置nova组件用 cinder 服务

 

cp /etc/nova/nova.conf{,.add_cinder_before}
openstack-config --set /etc/nova/nova.conf cinder os_region_name  RegionOne

###启动服务

 

systemctl restart openstack-nova-api.service

systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl status openstack-cinder-api.service openstack-cinder-scheduler.service

###创建volume 券

 

openstack volume create --size 1 volume1
openstack volume list

#### 初始化实例
source .admin_openrc.sh
##创建 provider 网络

 

openstack network create  --share --external   --provider-physical-network provider   --provider-network-type flat provider
openstack subnet create --network provider --allocation-pool start=13.13.60.2,end=113.13.60.250 --dns-nameserver 202.106.0.20 --gateway 13.13.59.1 --subnet-range 13.13.59.0/21 provider

##创建self-service network

 

openstack network create selfservice
openstack subnet create --network selfservice --dns-nameserver 202.106.0.20 --gateway 172.16.1.1 --subnet-range 172.16.1.0/24 selfservice

##创建路由

 

openstack router create router
neutron router-interface-add router selfservice
neutron router-gateway-set router provider

##确认配置

 

ip netns
neutron router-port-list router
ping -c 4

##创建模板

 

 

 

openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
openstack keypair list
openstack security group rule create --proto icmp default
openstack security group rule create --proto tcp --dst-port 22 default

 

###登录web 的dashboard 创建虚机
####成功创建
#### 打开安全组中的ssh 和icmp 协议
### 可以平通

 

 

 

 

 ping -c 4 13.13.60.6

64 bytes from 13.13.60.6: icmp_seq=1 ttl=63 time=0.540 ms
64 bytes from 13.13.60.6: icmp_seq=2 ttl=63 time=0.546 ms
64 bytes from 13.13.60.6: icmp_seq=3 ttl=63 time=0.596 ms
64 bytes from 13.13.60.6: icmp_seq=5 ttl=63 time=0.556 ms
64 bytes from 13.13.60.6: icmp_seq=6 ttl=63 time=0.549 ms

 

#### 在控制节点直接访问 虚拟机的flouting ip 并测试外网是ping通的状态

################################################################################################################
#######ceph集成到openstack ######################################################################################
################################################################################################################
 
 ### ceph 集成到glance节点
 
 ##在glance节点安装 package

 

 

 sudo yum install -y python-rbd 

###设置用户

 

mkdir /etc/ceph

####Cephx 密码为:Cephx

 

sudo useradd Cephx
sudo passwd Cephx

###增加sudoer 权限    用root 用户

 

cat << EOF >/etc/sudoers.d/Cephx
Cephx ALL = (root) NOPASSWD:ALL
Defaults:Cephx !requiretty
EOF

### 在ceph的admin 节点 增加volumes 创建keyring

 

##sudo ceph osd pool create images 128
#sudo ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rdb_children, allow rwx pool=images' -o /etc/ceph/ceph.client.glance.keyring
#scp /etc/ceph/ceph.conf root@controller:/etc/ceph
#scp /etc/ceph/etc/ceph/ceph.client.glance.keyring root@controller:/etc/ceph
#sudo chgrp glance /etc/ceph/ceph.client.glance.keyring
#sudo chmod 0640 /etc/ceph/ceph.client.glance.keyring

##备份glance-api.conf配置

 

egrep -v '#|^$' /etc/glance/glance-api.conf

cp /etc/glance/glance-api.conf{,.add_cephforglance_before}

###删除原来的glance_store存储 配置

 

##[glance_store]

openstack-config --del /etc/glance/glance-api.conf glance_store  stores  
openstack-config --del /etc/glance/glance-api.conf glance_store  default_store  
openstack-config --del /etc/glance/glance-api.conf glance_store  filesystem_store_datadir

###增加glance_store ceph 配置

 

 

 

##[DEFAULT]
sudo openstack-config --set  /etc/glance/glance-api.conf DEFAULT show_image_direct_url  True
 
###[glance_store]
sudo openstack-config --set /etc/glance/glance-api.conf glance_store stores  rbd
sudo openstack-config --set /etc/glance/glance-api.conf glance_store default_store  rbd
sudo openstack-config --set /etc/glance/glance-api.conf glance_store rbd_store_pool  images
sudo openstack-config --set /etc/glance/glance-api.conf glance_store rbd_store_user  glance
sudo openstack-config --set /etc/glance/glance-api.conf glance_store rbd_store_ceph_conf  /etc/ceph/ceph.conf
sudo openstack-config --set /etc/glance/glance-api.conf glance_store rbd_store_chunk_size  8

 

 ###确认配置

 

 

 egrep -v '#|^$' /etc/glance/glance-api.conf

###重启glance服务

 

systemctl restart openstack-glance-api openstack-glance-registry
systemctl status openstack-glance-api openstack-glance-registry

### 上传一个image 确认 集成ceph 成功

 

openstack image list
openstack image create --name "cirros" --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare --visibility public

############r########################################r########################r########################r############
############r##################cinder集成ceph ############r########################r########################r############
############r######################################r########################r########################r############

###在ceph admin 节点
###创建

 

ceph osd pool create volumes 128
##ceph osd pool create vms 128
ceph osd pool create backups 128


sudo ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' -o /etc/ceph/ceph.client.cinder.keyring
sudo ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups' -o /etc/ceph/ceph.client.cinder-backup.keyring

##在安装cinder-volume节点(object和block 节点上)

 

yum install ceph-common -y

####Cephx 密码为:Cephx

 

sudo useradd Cephx
sudo passwd Cephx

###增加sudoer 权限    用root 用户

 

cat << EOF >/etc/sudoers.d/Cephx
Cephx ALL = (root) NOPASSWD:ALL
Defaults:Cephx !requiretty
EOF

#### 复制秘钥和文件到 cinder节点

 

scp {ceph@ceph}:/etc/ceph/ceph.conf /etc/ceph
scp {ceph@ceph}:/etc/ceph/ceph.client.cinder.keyring /etc/ceph
scp {ceph@ceph}:/etc/ceph/ceph.client.cinder-backup.keyring /etc/ceph

##修改 权限

 

chgrp cinder /etc/ceph/ceph.client.cinder*
chmod 0640 /etc/ceph/ceph.client.cinder*

###设置cinder-volume 配置

 

cp /etc/cinder/cinder.conf{,.add_ceph_before}

###删除lvm之前的配置

 

##[DEFUALT]
##[DEFAULT]
openstack-config --del /etc/cinder/cinder.conf DEFAULT enabled_backends

##[lvm]
openstack-config --del /etc/cinder/cinder.conf  lvm volume_driver
openstack-config --del /etc/cinder/cinder.conf  lvm volume_group
openstack-config --del /etc/cinder/cinder.conf  lvm iscsi_protocol
openstack-config --del /etc/cinder/cinder.conf  lvm iscsi_helper
egrep -v '#|^$' /etc/cinder/cinder.conf

##增加ceph 后端配置

 

##[DEFAULT]
openstack-config --set /etc/cinder/cinder.conf DEFAULT enabled_backends  ceph
#[ceph]
openstack-config --set /etc/cinder/cinder.conf ceph volume_driver  cinder.volume.drivers.rbd.RBDDriver
openstack-config --set /etc/cinder/cinder.conf ceph volume_backend_name  backups
openstack-config --set /etc/cinder/cinder.conf ceph rbd_cluster_name  ceph
openstack-config --set /etc/cinder/cinder.conf ceph rbd_pool  volumes
openstack-config --set /etc/cinder/cinder.conf ceph rbd_user  cinder
openstack-config --set /etc/cinder/cinder.conf ceph rbd_ceph_conf  /etc/ceph/ceph.conf
openstack-config --set /etc/cinder/cinder.conf ceph rbd_flatten_volume_from_snapshot  false
openstack-config --set /etc/cinder/cinder.conf ceph rbd_secret_uuid   ee403c0a-ee0d-4f05-a4f8-05ecd658db2c
openstack-config --set /etc/cinder/cinder.conf ceph rbd_max_clone_depth  5
openstack-config --set /etc/cinder/cinder.conf ceph rbd_store_chunk_size  4
openstack-config --set /etc/cinder/cinder.conf ceph rbd_store_chunk_size  4
openstack-config --set /etc/cinder/cinder.conf ceph rados_connect_timeout  -1
openstack-config --set /etc/cinder/cinder.conf ceph rados_connection_retries  3
openstack-config --set /etc/cinder/cinder.conf ceph rados_connection_interval  5
openstack-config --set /etc/cinder/cinder.conf ceph replication_connect_timeout  5

##查看配置确认

 

egrep -v '#|^$' /etc/cinder/cinder.conf

#重启服务

 

systemctl restart openstack-cinder-volume.service target.service
systemctl status openstack-cinder-volume.service target.service

###创建 volume 测试

 

#openstack volume create --size 1 volume1
#openstack volume list
#rbd ls volumes 查找卷名称

######
############r########################################r########################r########################r############
############r##################nova  集成ceph ############r########################r########################r############
############r######################################r########################r########################r############
##ceph 节点创建

 

ceph osd pool create vms 128

##创建秘钥

 

###ceph auth get-or-create client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=vms, allow rx pool=images' -o /etc/ceph/ceph.client.nova.keyring

## scp 到计算节点
###安装ceph组件

 

yum  installed python-rbd ceph-common -y

#### 复制秘钥和文件到nova计算节点

 

 

 

ceph auth get-or-create client.client | tee /etc/ceph/ceph.client.cinder.keyring
scp {ceph@ceph}:/etc/ceph/ceph.conf /etc/ceph
scp {ceph@ceph}:/etc/ceph/ceph.client.nova.keyring /etc/ceph

 

##修改 权限

 

 

chgrp nova /etc/ceph/ceph.client.cinder*
chmod 0640 /etc/ceph/ceph.client.cinder*

####得到uuid

 

uuidgen |tee /etc/ceph/nova.uuid.txt   ###用cinder 的uuid
cat > /etc/ceph/nova.xml <<EOF
<secret ephemeral="no" private="no">
<uuid>ee403c0a-ee0d-4f05-a4f8-05ecd658db2c</uuid>
<usage type="ceph">
<name>client.cinder secret</name>
</usage>
</secret>
EOF

 virsh secret-define --file /etc/ceph/nova.xml

# virsh secret-set-value --secret 5020d7ca-14cc-4d84-b3ee-d945bf22d8eb --base64 $(cat /etc/ceph/client.nova.keying)
virsh secret-set-value --secret ee403c0a-ee0d-4f05-a4f8-05ecd658db2c --base64 AQDGC9NaOcOTMxAA3c78/w4rlUwYzMuSZbhhJw==

####配置nova 服务

cp /etc/nova/nova.conf{,.add_ceph_before}

###[libvirt]

openstack-config --set /etc/nova/nova.conf libvirt hw_disk_discard  unmap
openstack-config --set /etc/nova/nova.conf libvirt images_type  rbd
openstack-config --set /etc/nova/nova.conf libvirt images_rbd_pool  vms
openstack-config --set /etc/nova/nova.conf libvirt images_rbd_ceph_conf  /etc/ceph/ceph.conf
openstack-config --set /etc/nova/nova.conf libvirt rbd_user  nova
openstack-config --set /etc/nova/nova.conf libvirt rbd_secret_uuid  ee403c0a-ee0d-4f05-a4f8-05ecd658db2c
openstack-config --set /etc/nova/nova.conf libvirt disk_cachemodes  \"network=writeback\"
openstack-config --set /etc/nova/nova.conf libvirt inject_password  false
openstack-config --set /etc/nova/nova.conf libvirt inject_key  false
openstack-config --set /etc/nova/nova.conf libvirt inject_partition  -2
openstack-config --set /etc/nova/nova.conf libvirt live_migration_flag  \"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"

##检查配置

egrep -v '#|^$' /etc/nova/nova.conf

###重启nova服务

systemctl restart openstack-nova-compute
systemctl status openstack-nova-compute

####到这里集成完成
######本教程结束
 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

石兴稳

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值