手动搭建OpenStack(Ocata版)

基础环境准备

VMware 创建两台CentOS7虚拟机,内存2G,硬盘100G,网络配置NAT模式
controller:172.16.100.10
compute:172.16.100.20


  • 搭建阿里云Yum源

controller节点 & compute节点

[root@localhost ~]# systemctl stop firewalld
[root@localhost ~]# ^stop^disable
[root@controller ~]# setenforce 0
[root@controller ~]# sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux
[root@localhost ~]# rm -rf /etc/yum.repos.d/*
[root@localhost ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo  
[root@localhost ~]# cat >>/etc/yum.repos.d/CentOS-Base.repo  << EOF
[openstack-ocata]
name=openstack-ocata
baseurl=https://mirrors.aliyun.com/centos/7.5.1804/cloud/x86_64/openstack-ocata/
gpgcheck=0
enabled=1
[kvm-common]
name=kvm-common
baseurl=https://mirrors.aliyun.com/centos/7.5.1804/virt/x86_64/kvm-common/
gpgcheck=0
enabled=1
EOF
[root@localhost ~]# yum update     
[root@localhost ~]# yum install python-openstackclient openstack-selinux  
  • 配置主机名

controller节点 & compute节点

[root@localhost ~]# hostnamectl set-hostname controller&compute
[root@localhost ~]# vim /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
172.16.100.10 controller
172.16.100.20 compute
  • 同步时间

controller节点

[root@localhost ~]# yum -y install chrony
[root@localhost ~]# vim /etc/chrony.conf
server ntp1.aliyun.com iburst
server ntp2.aliyun.com iburst
server ntp3.aliyun.com iburst
allow 172.16.100.0/24
[root@localhost ~]# systemctl start chronyd
[root@localhost ~]# systemctl enable chronyd

compute节点

[root@compute ~]# yum -y install chrony
[root@compute ~]# vi /etc/chrony.conf
server controller iburst
[root@compute ~]# systemctl start chronyd.service
[root@compute ~]# ^start^enable
  • 消息队列
    controller节点
[root@controller ~]# yum -y install rabbitmq-server    
[root@controller ~]# systemctl start rabbitmq-server.service       监听端口5672
[root@controller ~]# ^start^enable
[root@controller ~]#  rabbitmqctl add_user openstack 000000
[root@controller ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
[root@controller ~]# rabbitmq-plugins list                         查看rabbitmq插件
[root@controller ~]# rabbitmq-plugins enable rabbitmq_management   启用管理插件,监听端口15672
[root@controller ~]# systemctl restart rabbitmq-server.service     

浏览器访问ip:15672,默认账号密码guest:guest,进入admin管理,设置如下权限

NameTagsCan access virtual hostsHas password
guestadministrator/
openstackadministrator/
  • SQL数据库
[root@controller ~]# yum -y install mariadb mariadb-server  python2-PyMySQL
[root@controller ~]# vim /etc/my.cnf
[mysqld]
default-storage-engine = innodb
innodb_file_per_table                            独立表空间
max_connections=10000                            最大连接数
collation-server = utf8_general_ci               默认排序规则
character-set-server = utf8                      字符集
[root@controller ~]# systemctl start mariadb
[root@controller ~]# ^start^enable
[root@controller ~]# mysql_secure_installation 
  • Memcached
[root@controller ~]# yum -y install memcached python-memcached
[root@controller ~]# vim /etc/sysconfig/memcached 
OPTIONS="-l 127.0.0.1,::1,controller"
[root@controller ~]# systemctl start memcached.service      监听端口11211
[root@controller ~]# ^start^enable

Keystone认证

controller节点

[root@controller ~]# mysql -uroot -p000000 -e "create database keystone;"
[root@controller ~]# mysql -uroot -p000000 -e "grant all privileges on keystone.* to keystone@'localhost' identified by '000000';"
[root@controller ~]# mysql -uroot -p000000 -e "grant all privileges on keystone.* to keystone@'%' identified by '000000';"
[root@controller ~]# yum install openstack-keystone httpd mod_wsgi
[root@controller ~]# vim /etc/keystone/keystone.conf 
[DEFAULT]
admin_token = d5ce3b84915c138c21fa
verbose = true                  详细输出日记
[database]
connection = mysql+pymysql://keystone:000000@controller/keystone
[memcache]
servers = localhost:11211
[token]
provider = fernet
driver = memcache       token设置存储在memcache
[revoke]
driver = sql
[root@controller ~]# grep '^[a-z]' /etc/keystone/keystone.conf 
[root@controller ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone
[root@controller ~]# mysql -ukeystone -p000000 -e "use keystone;show tables;"
[root@controller ~]# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
[root@controller ~]# keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
[root@controller ~]# keystone-manage bootstrap --bootstrap-password 000000   --bootstrap-admin-url http://controller:35357/v3/   --bootstrap-internal-url http://controller:5000/v3/   --bootstrap-public-url http://controller:5000/v3/  --bootstrap-region-id RegionOne
[root@controller ~]# vim /etc/httpd/conf/httpd.conf 
ServerName controller
[root@controller ~]# ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
[root@controller ~]# systemctl start httpd.service             公共端口5000,管理端口35357
[root@controller ~]# ^start^enable
[root@controller ~]# vim /etc/keystone/admin-openrc
export OS_USERNAME=admin
export OS_PASSWORD=000000
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_DOMAIN_NAME=default
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
[root@controller ~]# vim /etc/keystone/demo-openrc
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=000000
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
[root@controller ~]# source /etc/keystone/admin-openrc /etc/keystone/demo-openrc 
[root@controller ~]# openstack endpoint list
[root@controller ~]# openstack project create --domain default   --description "Service Project" service
[root@controller ~]# openstack project create --domain default   --description "Demo Project" demo
[root@controller ~]# openstack user create --domain default   --password 000000 demo
[root@controller ~]# openstack role create user
openstack role add --project demo --user demo user
[root@controller ~]# unset OS_AUTH_URL OS_PASSWORD
[root@controller ~]# openstack --os-auth-url http://controller:35357/v3   --os-project-domain-name default --os-user-domain-name default   --os-project-name admin --os-username admin token issue  验证请求token
[root@controller ~]# openstack --os-auth-url http://controller:5000/v3   --os-project-domain-name default --os-user-domain-name default   --os-project-name demo --os-username demo token issue
[root@controller ~]# source /etc/keystone/admin-openrc.sh
[root@controller ~]# openstack token issue

Glance镜像

controller节点

[root@controller ~]# mysql -uroot -p000000 -e "create database glance;"
[root@controller ~]# mysql -uroot -p000000 -e "grant all privileges on glance.* to glance@'localhost' identified by '000000';"
[root@controller ~]# mysql -uroot -p000000 -e "grant all privileges on glance.* to glance@'%' identified by '000000';"
[root@controller ~]# openstack user create --domain default --password 000000 glance
[root@controller ~]# openstack role add --project service --user glance admin
[root@controller ~]# openstack service create --name glance   --description "OpenStack Image" image
[root@controller ~]# openstack endpoint create --region RegionOne   image public http://controller:9292
[root@controller ~]# openstack endpoint create --region RegionOne   image internal http://controller:9292
[root@controller ~]# openstack endpoint create --region RegionOne   image admin http://controller:9292
[root@controller ~]# yum -y install openstack-glance
[root@controller ~]# vim /etc/glance/glance-api.conf 
[database]
connection = mysql+pymysql://glance:000000@controller/glance
[keystone_authtoken]
uth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = 000000
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
[root@controller ~]# vim /etc/glance/glance-registry.conf 
[database]
connection = mysql+pymysql://glance:000000@controller/glance
[keystone_authtoken]
uth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = 000000
[paste_deploy]
flavor = keystone
[root@controller ~]# su -s /bin/sh -c "glance-manage db_sync" glance
[root@controller ~]# systemctl enable openstack-glance-api.service   openstack-glance-registry.service
[root@controller ~]# ^enable^start
[root@controller ~]# openstack image list
[root@controller ~]# wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img                        #账号密码cirros:cubswin:)#
[root@controller ~]# openstack image create "cirros"   --file cirros-0.3.5-x86_64-disk.img   --disk-format qcow2 --container-format bare   --public
[root@controller ~]# openstack image list
+--------------------------------------+--------+--------+
| ID                                   | Name   | Status |
+--------------------------------------+--------+--------+
| 01e261fe-489d-4781-ae28-286cfa8ac1e8 | cirros | active |
+--------------------------------------+--------+--------+

Nova计算

controller节点

[root@controller ~]# mysql -uroot -p000000 -e "create database nova;"
[root@controller ~]# mysql -uroot -p000000 -e "create database nova_api;"
[root@controller ~]# mysql -uroot -p000000 -e "create database nova_cell0;"
[root@controller ~]# mysql -uroot -p000000 -e "grant all privileges on nova.* to nova@'localhost' identified by '000000';"
[root@controller ~]# mysql -uroot -p000000 -e "grant all privileges on nova.* to nova@'%' identified by '000000';"
[root@controller ~]# mysql -uroot -p000000 -e "grant all privileges on nova_api.* to nova@'localhost' identified by '000000';"
[root@controller ~]# mysql -uroot -p000000 -e "grant all privileges on nova_api.* to nova@'%' identified by '000000';"
[root@controller ~]# mysql -uroot -p000000 -e "grant all privileges on nova_cell0.* to nova_cell0@'localhost' identified by '000000';"
[root@controller ~]# mysql -uroot -p000000 -e "grant all privileges on nova_cell0.* to nova_cell0@'%' identified by '000000';"
[root@controller ~]# openstack user create --domain default --password 000000 nova
[root@controller ~]# openstack role add --project service --user nova admin
[root@controller ~]# openstack service create --name nova   --description "OpenStack Compute" compute
[root@controller ~]# openstack endpoint create --region RegionOne   compute public http://controller:8774/v2.1
[root@controller ~]# openstack endpoint create --region RegionOne   compute internal http://controller:8774/v2.1
[root@controller ~]# openstack endpoint create --region RegionOne   compute admin http://controller:8774/v2.1
[root@controller ~]# openstack user create --domain default --password 000000 placement
[root@controller ~]# openstack role add --project service --user placement admin
[root@controller ~]# openstack service create --name placement --description "Placement API" placement
[root@controller ~]# openstack endpoint create --region RegionOne placement public http://controller:8778
[root@controller ~]# openstack endpoint create --region RegionOne placement internal http://controller:8778
[root@controller ~]# openstack endpoint create --region RegionOne placement admin http://controller:8778
[root@controller ~]# yum -y install openstack-nova-api openstack-nova-conductor   openstack-nova-console openstack-nova-novncproxy   openstack-nova-scheduler openstack-nova-placement-api
[root@controller ~]# vim /etc/nova/nova.conf 
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url=rabbit://openstack:000000@controller
use_neutron=true
firewall_driver=nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:000000@controller/nova_api
[database]
connection = mysql+pymysql://nova:000000@controller/nova
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = 000000
[vnc]
enabled = true
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = controller
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
os_region_name = RegionOne
project_domain_name = default
project_name = service
auth_type = password
user_domain_name = default
auth_url = http://controller:35357/v3
username = placement
password = 000000
[root@controller ~]# cat >>/etc/httpd/conf.d/00-nova-placement-api.conf <<EOF
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>
EOF
[root@controller ~]# systemctl restart httpd

#####数据库注册#####
[root@controller ~]# su -s /bin/sh -c "nova-manage api_db sync" nova
[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
357ec8f8-bf3b-4e04-8689-ccaec9556c5d
[root@controller ~]# su -s /bin/sh -c "nova-manage db sync" nova
[root@controller ~]# mysql -unova -p000000 -e "use nova;show tables;"
[root@controller ~]# nova-manage cell_v2 list_cells
[root@controller ~]# systemctl start openstack-nova-api.service   openstack-nova-consoleauth.service openstack-nova-scheduler.service   openstack-nova-conductor.service openstack-nova-novncproxy.service
[root@controller ~]# ^start^enable
[root@controller ~]# nova service-list
[root@controller ~]# openstack host list

compute节点

# yum install openstack-nova-compute
[root@compute yum.repos.d]# scp controller:/etc/nova/nova.conf /etc/nova/
[root@compute ~]# vim /etc/nova/nova.conf 
[database]
删除connection = mysql+pymysql://nova:000000@controller/nova
[api_database]
删除connection = mysql+pymysql://nova:000000@controller/nova_api
[vnc]
修改vncserver_proxyclient_address = compute
增加novncproxy_base_url = http://controller:6080/vnc_auto.html
###VMware开启虚拟化:虚拟机设置>处理器>虚拟化引擎>虚拟化Intel VT-x/EPT或AMD-V/RVI(V)/proc/cpuinfo
[root@compute ~]# egrep -c '(vmx|svm)'   
1
###返回值>=1,那么你的计算节点支持硬件加速且不需要额外的配置,否则在 /etc/nova/nova.conf 文件的 [libvirt] 区域做出如下的编辑:
[libvirt]
virt_type = qemu
[root@compute ~]# systemctl enable libvirtd.service openstack-nova-compute.service
[root@compute ~]# ^enable^start

controller节点

[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
[root@controller ~]# nova service-list
[root@controller ~]# openstack hypervisor list
[root@controller ~]# openstack host list

Neutron网络

controller节点

#####创建数据库#####
[root@controller ~]# mysql -uroot -p000000 -e "create database neutron;"
[root@controller ~]# mysql -uroot -p000000 -e "grant all privileges on neutron.* to neutron@'localhost' identified by '000000';"
[root@controller ~]# mysql -uroot -p000000 -e "grant all privileges on neutron.* to neutron@'%' identified by '000000';"

#####创建用户和服务端点#####
[root@controller ~]# openstack user create --domain default --password 000000 neutron
[root@controller ~]# openstack role add --project service --user neutron admin
[root@controller ~]# openstack service create --name neutron   --description "OpenStack Networking" network
[root@controller ~]# openstack endpoint create --region RegionOne   network public http://controller:9696
[root@controller ~]# openstack endpoint create --region RegionOne   network internal http://controller:9696
[root@controller ~]# openstack endpoint create --region RegionOne   network admin http://controller:9696

#####配置flat网络服务组件#####
[root@controller ~]# yum install openstack-neutron openstack-neutron-ml2   openstack-neutron-linuxbridge ebtables
[root@controller ~]# vim /etc/neutron/neutron.conf 
[DEFAULT]
core_plugin = ml2          启用ML2插件并禁用其他插件
service_plugins =
transport_url = rabbit://openstack:000000@controller
auth_strategy = keystone
[database]
connection = mysql+pymysql://neutron:000000@controller/neutron
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service 
username = neutron
password = 000000
[nova]
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = 000000
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[root@controller ~]# vim /etc/neutron/plugins/ml2/ml2_conf.ini 
[ml2]
type_drivers = flat,vlan,gre,vxlan,geneve   
tenant_network_types =  flat,vlan,gre,vxlan,geneve
mechanism_drivers = linuxbridge    启用Linuxbridge机制
extension_drivers = port_security      启用端口安全扩展驱动
[ml2_type_flat]
flat_networks = public        
[securitygroup]
enable_ipset = true             启用 ipset 增加安全组的方便性

#####配置Linuxbridge代理#####
[root@controller ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = public:eno16777728
[vxlan]
enable_vxlan = false      禁止VXLAN覆盖网络
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver              启用安全组并配置 Linux 桥接 iptables 防火墙驱动

#####配置DHCP代理#####
[root@controller ~]# vim /etc/neutron/dhcp_agent.ini 
[DEFAULT]
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata =true
[root@controller ~]# vim /etc/neutron/metadata_agent.ini 
[DEFAULT]
nova_metadata_ip = controller
metadata_proxy_shared_secret = 000000   配置元数据主机以及共享密码

#####配置计算服务来使用网络服务#####
[root@controller ~]# vim /etc/nova/nova.conf 
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 000000
service_metadata_proxy = true
metadata_proxy_shared_secret = 000000

[root@controller ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
[root@controller ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
[root@controller ~]# systemctl restart openstack-nova-api.service
[root@controller ~]# systemctl start neutron-server.service   neutron-linuxbridge-agent.service neutron-dhcp-agent.service   neutron-metadata-agent.service
[root@controller ~]# ^start^enable

#####验证服务#####
[root@controller ~]# openstack network agent list
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| ID                                   | Agent Type         | Host       | Availability Zone | Alive | State | Binary                    |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| 849a786c-6054-4368-b50c-62fe846ecb3e | Linux bridge agent | controller | None              | True  | UP    | neutron-linuxbridge-agent |
| d597af76-5708-4135-b22c-2e2aa3ce55e1 | DHCP agent         | controller | nova              | True  | UP    | neutron-dhcp-agent        |
| f63961f6-dbc7-4ab9-bdd6-266cf6df9636 | Metadata agent     | controller | None              | True  | UP    | neutron-metadata-agent    |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+

compute节点

[root@compute ~]# yum -y install openstack-neutron-linuxbridge ebtables ipset
[root@compute ~]# vim /etc/neutron/neutron.conf 
[DEFAULT]
transport_url = rabbit://openstack:000000@controller
auth_strategy = keystone
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 000000
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

#####配置Linuxbridge代理#####
[root@compute ~]# scp controller:/etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/

#####配置计算服务来使用网络服务#####
[root@compute ~]# vim /etc/nova/nova.conf 
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 000000

[root@compute ~]# systemctl restart openstack-nova-compute.service
[root@compute ~]# systemctl start neutron-linuxbridge-agent.service
[root@compute ~]# ^start^enable

controller节点

#####验证服务#####
[root@controller ~]# openstack network agent list
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| ID                                   | Agent Type         | Host       | Availability Zone | Alive | State | Binary                    |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| 702a7d99-47bf-4055-a649-120e4223fec8 | Linux bridge agent | compute    | None              | True  | UP    | neutron-linuxbridge-agent |
| 849a786c-6054-4368-b50c-62fe846ecb3e | Linux bridge agent | controller | None              | True  | UP    | neutron-linuxbridge-agent |
| d597af76-5708-4135-b22c-2e2aa3ce55e1 | DHCP agent         | controller | nova              | True  | UP    | neutron-dhcp-agent        |
| f63961f6-dbc7-4ab9-bdd6-266cf6df9636 | Metadata agent     | controller | None              | True  | UP    | neutron-metadata-agent    |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
[root@controller ~]# netstat -lnpute
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address                  
tcp        0      0 172.16.100.10:3306         mysql
tcp        0      0 172.16.100.10:11211        memcache
tcp        0      0 0.0.0.0:9292               glance_api        
tcp        0      0 0.0.0.0:9696               neutron
tcp        0      0 0.0.0.0:6080               novcxproxy
tcp        0      0 0.0.0.0:8774               nova_api  
tcp        0      0 0.0.0.0:8775               nova
tcp        0      0 0.0.0.0:9191               glance_register
tcp6       0      0 :::5672                    rabbitmq
tcp6       0      0 :::5000                    keystone             
tcp6       0      0 :::35357                   keystone_admin      

Dashboard界面

controller节点

[root@controller ~]# yum install openstack-dashboard
[root@controller ~]# vim /etc/openstack-dashboard/local_settings
OPENSTACK_HOST = "controller"     keystone服务IP
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST         #启用第3版认证API#
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"       #通过仪表盘创建的用户默认角色user#
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'default'          #创建普通用户是默认域#
ALLOWED_HOSTS = ['*',]
SESSION_ENGINE =  'django.contrib.sessions.backends.cache'           #配置 memcached 会话存储服务#
CACHES = {
    'default': {
        'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
        'LOCATION': 'controller:11211',
    },
}
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True         #启用对域的支持#
OPENSTACK_API_VERSIONS = {           #配置API版本#
    "identity": 3,
    "image": 2,
    "volume": 2,
} 
OPENSTACK_NEUTRON_NETWORK = {         #禁用支持3层网络服务#
    'enable_router': False,
    'enable_quotas': False,
    'enable_distributed_router': False,
    'enable_ha_router': False,
    'enable_lb': False,
    'enable_firewall': False,
    'enable_vpn': False,
    'enable_fip_topology_check': False,
TIME_ZONE = "Asia/Shanghai"      #配置时区#

Cinder块存储

controller节点

#####创建数据库######
[root@controller ~]# mysql -uroot -p000000 -e "create database cinder;"
[root@controller ~]# mysql -uroot -p000000 -e "grant all privileges on cinder.* to cinder@'localhost' identified by '000000';"
[root@controller ~]# mysql -uroot -p000000 -e "grant all privileges on cinder.* to cinder@'%' identified by '000000';"
[root@controller ~]# mysql -uroot -p000000 -e "show databases;"

#####创建用户#####
[root@controller ~]# openstack user create --domain default --password 000000 cinder
[root@controller ~]# openstack role add --project service --user cinder admin

######创建服务实体#####
[root@controller ~]# openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
[root@controller ~]# openstack service create --name cinderv3   --description "OpenStack Block Storage" volumev3

#####创建服务API端点#####
[root@controller ~]# openstack endpoint create --region RegionOne   volumev2 public http://controller:8776/v2/%\(project_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne   volumev2 internal http://controller:8776/v2/%\(project_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne   volumev2 admin http://controller:8776/v2/%\(project_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne   volumev3 public http://controller:8776/v3/%\(project_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne   volumev3 internal http://controller:8776/v3/%\(project_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne   volumev3 admin http://controller:8776/v3/%\(project_id\)s

#####安装配置组件######
[root@controller ~]# yum install openstack-cinder
[root@controller ~]# vim /etc/cinder/cinder.conf 
[DEFAULT]
transport_url = rabbit://openstack:000000@controller
auth_strategy = keystone
[database]
connection = mysql+pymysql://cinder:000000@controller/cinder
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = 000000
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

#####初始化数据库#####
[root@controller ~]# su -s /bin/sh -c "cinder-manage db sync" cinder

#####配置计算服务以使用块设备存储#####
[root@controller ~]# vim /etc/nova/nova.conf 
[cinder]
os_region_name = RegionOne
[root@controller ~]# systemctl restart openstack-nova-api.service
[root@controller ~]# systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
[root@controller ~]# ^enable^start

compute节点

[root@compute ~]# yum install lvm2
[root@compute ~]# systemctl start lvm2-lvmetad.service
[root@compute ~]# ^start^enable
[root@compute ~]# pvcreate /dev/sdb
[root@compute ~]# vgcreate cinder-volumes /dev/sdb
[root@compute ~]# vim /etc/lvm/lvm.conf 
devices {
        filter = [ "a/sda/", "a/sdb/", "r/.*/"]
[root@compute ~]# yum -y install openstack-cinder targetcli python-keystone
[root@compute ~]# scp controller:/etc/cinder/cinder.conf /etc/cinder/
[root@compute ~]# vim /etc/cinder/cinder.conf 
[DEFAULT]
增加enabled_backends = lvm
增加glance_api_servers = http://controller:9292
增加iscsi_ip_address = compute
[root@compute ~]# systemctl start openstack-cinder-volume.service target.service
[root@compute ~]# ^start^enable
[root@controller ~]# openstack volume service list
+------------------+-------------+------+---------+-------+----------------------------+
| Binary           | Host        | Zone | Status  | State | Updated At                 |
+------------------+-------------+------+---------+-------+----------------------------+
| cinder-scheduler | controller  | nova | enabled | up    | 2018-06-21T07:13:23.000000 |
| cinder-volume    | compute@lvm | nova | enabled | up    | 2018-06-21T07:13:21.000000 |
+------------------+-------------+------+---------+-------+----------------------------+
  • 0
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
一、Virtual Box的安装 1、安装Virtual Box 2、 Virtual Box网络设定 3、 安装操作系统 二、 环境预配置 1、 网路设置 2、 分别修改三个虚拟机的主机名 3、 主机地址映射配置 4、 禁用selinux 5、 CentOS6本地yum源配置 注:先加载镜像 6、 NTP安装服务 三、 本地源制作 1. Centos6.6本地base、extra源制作 2. 本地base、extra源制作 3. 本地epel、openstack源制作 4. ftp服务安装 5. 修改yum源仓库指向文件 四、 keystone安装(上) 1. 安装Mysql服务 2. 安装rabbitmq消息队列 3. 为nova,neutron,cinder.heat创建用户并授权 五、 keystone安装(下) 1. 创建库和授权 2. 创建库和授权 3. 生成PKI认证所需要的证书文件 4. 同步keystone数据库,生成keystone所需的表 5. 启动keystone服务和校验服务状态 6. 创cron任务,配置定期清理过期的token 7. keystone创建user,tenant,role和endpoint 六、 Glance安装 1. Glance的安装 2. 配置glance-api服务 3. 配置glance-registry服务 4. 启动并校验glance服务 57 七、 Nova安装 59 1. nova的安装与配置 59 2. 安装和配置nova 60 八、 Neutron安装 63 1. neutron的安装与配置 63 2.Neutron使用二层组件 66 3. 配置OVS二层插件 67 4. 配置nova支持neutron 67 5. 启动neutron-server服务 68 6. 重启nova服务和neutron联动 69 7. 重启neutron-server 69 8. controller0上校验neutron的配置 69 九、 Horizon安装 71 1. Horizon组件的安装与配置 71 十、 Compute0安装nova 74 1. nova的安装与配置(compute0-10.20.0.30) 74 2. nova的安装与配置 75 十一、 Compute0安装neutron 78 1. neutron的安装与配置(compute0) 78 十二、 Network0安装neutron 83 1. neutron的安装与配置 83 十三、 新建网络 91 1. 配置安全组规则 91 2. 新建网络 92 3 .创建云主机 99 4 .分配浮动ip 101 十四、 心得体会 107
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值