增加p_w_picpath - 前期准备(controller)


p_w_picpath又叫做glance,是用来管理镜像的一个组件,我们用镜像来安装操作系统。glance支持让用户自己管理自定义镜像。

创建glance库和用户

mysql -uroot -ptn1Pi6Ytm

> CREATE database  glance;

> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost'    IDENTIFIED BY 'Zznky4tP0';

> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%'    IDENTIFIED BY 'Zznky4tP0';

执行 admin-openrc.sh 脚本   source admin-openrc.sh

创建glance用户(密码为hf8LX9bow)

openstack user create --domain default --password-prompt glance

把admin角色添加到glance用户和service租户

openstack role add --project service --user glance admin

创建glance服务实体

openstack service create --name glance   --description "OpenStack Image service" p_w_picpath

创建p_w_picpath服务api 端点

openstack endpoint create --region RegionOne   p_w_picpath public http://controller:9292

openstack endpoint create --region RegionOne   p_w_picpath internal http://controller:9292

openstack endpoint create --region RegionOne   p_w_picpath admin http://controller:9292


增加p_w_picpath - 安装和配置(controller)


安装包

yum install -y openstack-glance python-glance python-glanceclient

编辑配置文件

vim /etc/glance/glance-api.conf   //更改或增加


[database] 

connection = mysql://glance:Zznky4tP0@controller/glance


[keystone_authtoken]

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = glance

password = hf8LX9bow

[paste_deploy]

flavor = keystone


[glance_store]

default_store = file

filesystem_store_datadir = /var/lib/glance/p_w_picpaths/


[DEFAULT]

notificaction_driver = noop

verbose=True


vim /etc/glance/glance-registry.conf  //更改或增加

[DEFAULT]

notificaction_driver = noop

verbose=True


[database] 

connection = mysql://glance:Zznky4tP0@controller/glance

[keystone_authtoken]

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = glance

password = hf8LX9bow

[paste_deploy]

flavor = keystone


同步glance数据库数据

su -s /bin/sh -c "glance-manage db_sync" glance

启动服务

systemctl enable openstack-glance-api.service   openstack-glance-registry.service

systemctl start openstack-glance-api.service   openstack-glance-registry.service



增加p_w_picpath - 验证操作(controller)

(1) 添加环境变量

echo "export OS_IMAGE_API_VERSION=2"   | tee -a admin-openrc.sh demo-openrc.sh#此句代表将变量内容其中的一句追加到脚本里面


(2) 执行admin-openrc.sh

source admin-openrc.sh


(3) 下载镜像

wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img


(4) 把刚刚下载的镜像上传到镜像服务中心

glance p_w_picpath-create --name "cirros" \

 --file cirros-0.3.4-x86_64-disk.img \

 --disk-format qcow2 --container-format bare \

 --visibility public --progress


然后我们可以在 /var/lib/glance/p_w_picpaths/目录下看到一个文件,这个就是刚刚上传的镜像,你会发现这个文件的名字和id是一致的。

使用命令  glance p_w_picpath-list 可以查看镜像列表


增加compute - 前期准备(controller)

compute又叫nova,是OpenStack中的计算组织控制器。OpenStack中实例(instances)生命周期的所有活动都由Nova处理。这样使得Nova成为一个负责管理计算资源、网络、认证、所需可扩展性的平台。但是,Nova自身并没有提供任何虚拟化能力,相反它使用libvirt API来与被支持的Hypervisors(kvm、xen、vmware等)交互。

创建nova库,并创建nova用户  

mysql -uroot -ptn1Pi6Ytm

> CREATE DATABASE nova;

> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost'    IDENTIFIED BY 'RYgv0rg7p';

> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%'    IDENTIFIED BY 'RYgv0rg7p';

初始化环境变量   source admin-openrc.sh

创建nova用户 密码为 hsSNsqc43  

openstack user create --domain default --password-prompt nova

添加admin角色到nova用户  

openstack role add --project service --user nova admin

创建nova服务实例 

openstack service create --name nova   --description "OpenStack Compute" compute


创建api端点

openstack endpoint create --region RegionOne  compute public http://controller:8774/v2/%\(tenant_id\)s

openstack endpoint create --region RegionOne  compute internal http://controller:8774/v2/%\(tenant_id\)s

openstack endpoint create --region RegionOne  compute admin http://controller:8774/v2/%\(tenant_id\)s



增加compute - 安装包并配置(controller)

yum install openstack-nova-api openstack-nova-cert   openstack-nova-conductor openstack-nova-console \

 openstack-nova-novncproxy openstack-nova-scheduler  python-novaclient -y

编辑配置文件   

vim  /etc/nova/nova.conf  //更改或增加配置

[database]

connection = mysql://nova:RYgv0rg7p@controller/nova


[DEFAULT]

rpc_backend=rabbit

my_ip=192.168.16.111

auth_strategy=keystone

network_api_class = nova.network.neutronv2.api.API

security_group_api = neutron

linuxnet_interface_driver = nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver

firewall_driver = nova.virt.firewall.NoopFirewallDriver

enabled_apis=osapi_compute,metadata

verbose=true


[keystone_authtoken]

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = nova

password = hsSNsqc43


[oslo_messaging_rabbit]

rabbit_host = controller

rabbit_userid = openstack

rabbit_password = o3NXovnz5


[vnc]

vncserver_listen = $my_ip

vncserver_proxyclient_address = $my_ip

[glance]

host = controller

[oslo_concurrency]

lock_path = /var/lib/nova/tmp

同步数据创建nova库  

su -s /bin/sh -c "nova-manage db sync" nova

启动服务

systemctl enable openstack-nova-api.service \

 openstack-nova-cert.service openstack-nova-consoleauth.service \

 openstack-nova-scheduler.service openstack-nova-conductor.service \

 openstack-nova-novncproxy.service

systemctl start openstack-nova-api.service \

 openstack-nova-cert.service openstack-nova-consoleauth.service \

 openstack-nova-scheduler.service openstack-nova-conductor.service \

 openstack-nova-novncproxy.service


增加compute - 安装包并配置(compute)

安装nova-compute包  

yum install -y openstack-nova-compute sysfsutils


编辑配置文件  

vim  /etc/nova/nova.conf  //更改或增加如下配置

[DEFAULT]

rpc_backend = rabbit

auth_strategy = keystone

my_ip = 192.168.16.112

network_api_class = nova.network.neutronv2.api.API

security_group_api = neutron

linuxnet_interface_driver = nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver

firewall_driver = nova.virt.firewall.NoopFirewallDriver

verbose=true

[oslo_messaging_rabbit]

rabbit_host = controller

rabbit_userid = openstack

rabbit_password = o3NXovnz5


[keystone_authtoken]

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = nova

password = hsSNsqc43

[vnc]

enabled = True

vncserver_listen = 0.0.0.0

vncserver_proxyclient_address = $my_ip

novncproxy_base_url = http://controller:6080/vnc_auto.html

[glance]

host = controller

[oslo_concurrency]

lock_path = /var/lib/nova/tmp


使用如下命令检查你的机器cpu是否支持虚拟化

egrep -c '(vmx|svm)' /proc/cpuinfo

如果得到的数字大于0,说明是支持的,否则说明不支持,若为0,需要编辑配置文件,不等于0就不用编辑配置

vim  /etc/nova/nova.conf  //编辑

[libvirt]

virt_type = qemu


启动服务

systemctl enable libvirtd.service openstack-nova-compute.service

systemctl start libvirtd.service openstack-nova-compute.service  


增加compute - 验证操作(controller)

执行脚本

source admin-openrc.sh


列出服务组件

nova service-list

共有5个:nova-consoleauth nova-conductor nova-scheduler nova-cert nova-compute


列出api端点,一共有9组: nova三组,glance三组,keystone三组

nova endpoints


如果有提示

WARNING: nova has no endpoint in ! Available endpoints for this service:

可以忽略掉,也可以编辑  admin-openrc.sh   增肌一行 export OS_REGION_NAME=RegionOne  


列出镜像

nova p_w_picpath-list     


增加Networking - 前期准备(controller)


Networking又叫做Neutron,是Openstack必不可少的组件,它其实是网络虚拟化的实现工具,可以让我们模拟出路由器、交换机、网卡等网络设备。

Neutron支持两种网络模式,第一种是非常简单的网络架构,它仅支持是让实例连接外网,不支持自定义网络、路由器以及浮动ip。只有管理员或者授权的用户有权限去管理网络。第二种网络功能比较强大,支持自定义网络管理,支持自建路由器并且也支持浮动ip。即使没有授权的用户也可以管理网络,支持用户自己配置和管理。

创建库、授权账号 mysql -uroot -ptn1Pi6Ytm

> CREATE DATABASE neutron;

> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost'    IDENTIFIED BY 'RYgv0rg7p';

> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%'    IDENTIFIED BY 'RYgv0rg7p';

执行脚本 source admin-openrc.sh

创建neutron用户(密码为mdcGVl29i)

openstack user create --domain default --password-prompt neutron

把admin角色添加到neutron用户里  

openstack role add --project service --user neutron admin

创建neutron实例

openstack service create --name neutron   --description "OpenStack Networking" network

创建networking服务api终端

openstack endpoint create --region RegionOne   network public http://controller:9696

openstack endpoint create --region RegionOne   network internal http://controller:9696

openstack endpoint create --region RegionOne   network admin http://controller:9696


增加Networking - 配置(controller)


安装组件

yum install openstack-neutron openstack-neutron-ml2 \

  openstack-neutron-linuxbridge python-neutronclient ebtables ipset -y 

配置服务端组件

vim   /etc/neutron/neutron.conf  //更改或增加

[DEFAULT]

core_plugin = ml2

service_plugins = 

rpc_backend = rabbit

auth_strategy = keystone

notify_nova_on_port_status_changes = True

notify_nova_on_port_data_changes = True

nova_url = http://controller:8774/v2

verbose = True


[database]

connection = mysql://neutron:quidyOC50@controller/neutron


[oslo_messaging_rabbit]

rabbit_host = controller

rabbit_userid = openstack

rabbit_password = o3NXovnz5


[keystone_authtoken]

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = neutron

password = mdcGVl29i


[nova]

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

region_name = RegionOne

project_name = service

username = nova

password = hsSNsqc43

[oslo_concurrency]

lock_path = /var/lib/neutron/tmp

配置ml2 插件

vim  /etc/neutron/plugins/ml2/ml2_conf.ini  //更改或增加

[ml2]

type_drivers = flat,vlan

tenant_network_types = 

mechanism_drivers = linuxbridge

extension_drivers = port_security


[ml2_type_flat]

flat_networks = public

[securitygroup]

enable_ipset = True


编辑linux桥接agent

vim  /etc/neutron/plugins/ml2/linuxbridge_agent.ini  //增加或更改

[linux_bridge]

physical_interface_mappings = public:eno16777736

[vxlan]

enable_vxlan = False

[agent]

prevent_arp_spoofing = True

[securitygroup]

enable_security_group = True

firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver


配置dhcp agent

vim  /etc/neutron/dhcp_agent.ini  //增加或更改

[DEFAULT]

interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver

dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq

enable_isolated_metadata = True

verbose = True


增加Networking - 配置元数据agent(controller)


编辑配置文件

vim  /etc/neutron/metadata_agent.ini  //更改或增加

[DEFAULT]

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_region = RegionOne

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = neutron

password = mdcGVl29i

nova_metadata_ip = controller

metadata_proxy_shared_secret = m8uhmQTu2

verbose = True

说明:需要删除掉配置文件里原有的 auth_url   auth_region admin_tenant_name  admin_user  admin_password 


增加Networking - compute使用网络(controller)


vim /etc/nova/nova.conf  //更改或添加

[neutron]

url = http://controller:9696

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

region_name = RegionOne

project_name = service

username = neutron

password = mdcGVl29i


service_metadata_proxy = True

metadata_proxy_shared_secret = m8uhmQTu2


增加Networking - 启动服务(controller)


创建ml2插件配置文件创建软连接

ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

生成数据

su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \

 --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

重启compute api服务

systemctl restart openstack-nova-api.service

启动服务

systemctl enable neutron-server.service \

 neutron-linuxbridge-agent.service neutron-dhcp-agent.service \

 neutron-metadata-agent.service

systemctl start neutron-server.service \

 neutron-linuxbridge-agent.service neutron-dhcp-agent.service \

 neutron-metadata-agent.service

systemctl enable neutron-l3-agent.service

systemctl start neutron-l3-agent.service


增加Networking - 配置compute节点(compute)


安装组件 yum install -y openstack-neutron openstack-neutron-linuxbridge ebtables ipset

配置普通组件  vim /etc/neutron/neutron.conf //更改或增加

[DEFAULT]

rpc_backend = rabbit

auth_strategy = keystone

verbose = True

[oslo_messaging_rabbit]

rabbit_host = controller

rabbit_userid = openstack

rabbit_password = o3NXovnz5

[keystone_authtoken]

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = neutron

password = mdcGVl29i

[oslo_concurrency]

lock_path = /var/lib/neutron/tmp


配置linux桥接agent

vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[linux_bridge]

physical_interface_mappings = public:eno16777736


[vxlan]

enable_vxlan = False


[agent]

prevent_arp_spoofing = True


[securitygroup]

enable_security_group = True

firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver


配置compute使用网络

vim /etc/nova/nova.conf  //更改或增加

[neutron]

url = http://controller:9696

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

region_name = RegionOne

project_name = service

username = neutron

password = mdcGVl29i

启动服务

systemctl restart openstack-nova-compute.service   

systemctl enable neutron-linuxbridge-agent.service

systemctl start neutron-linuxbridge-agent.service


增加Networking - 验证配置(controller)


执行环境变量脚本

source admin-openrc.sh


列出所有的扩展

neutron ext-list


列出所有agent

neutron agent-list


agent type如下:

Linux bridge agent

Linux bridge agent

DHCP agent

Metadata agent

必须要有4个,否则说明上面的某个步骤配置有问题。