Centos7部署openstack-p版

控制节点192.168.2.243 计算节点192.168.2.244
部署
	基础环境
		yum -y install centos-release-openstack-pike
		yum -y remove mariadb-libs
		systemctl stop firewalld
		systemctl disable firewalld
		setenforce 0
		sed -i '/^SELINUX=/cSELINUX=disabled' /etc/selinux/config
		yum -y install ntpdate
		ntpdate time1.aliyun.com
		yum -y install openstack-selinux					有时selinux忘记关闭时会帮你配置
		yum -y install python-openstackclient					安装openstack客户端
		yum -y install mariadb mariadb-server python2-PyMySQL			安装数据库,以及python访问数据库的模块
		vim /usr/lib/systemd/system/mariadb.service				yum安装mysql时要设置参数才能调整mysql的最大连接数
			[Service]
			LimitNOFILE = 65535
		systemctl  daemon-reload
		vim /etc/my.cnf.d/openstack.cnf	
			[mysqld]
			bind-address = 192.168.2.243
			default-storage-engine = innodb
			innodb_file_per_table = on
			max_connections = 4096
			collation-server = utf8_general_ci
			character-set-server = utf8
		systemctl enable mariadb
		systemctl start mariadb
		mysql_secure_installation						设置密码以及安全初始化
		yum -y install rabbitmq-server					消息队列 openstart内的通讯(15672)
		systemctl enable rabbitmq-server.service
		systemctl start rabbitmq-server.service
		rabbitmqctl add_user openstack openstack				连接消息队列需要用户,密码也设置为openstack
		rabbitmqctl set_permissions openstack ".*" ".*" ".*"			给openstack用户配置权限,所有域所有权限
		yum -y install memcached python-memcached		缓存软件的安装
		vim /etc/sysconfig/memcached
		systemctl enable memcached.service
		systemctl start memcached.service
	认证服务KeyStone部署   (mysql3306,memcached11211,rabbitmq5672,keystone5000和35357)
		echo "create database keystone;" |mysql -p'123456'			创建数据库并建立授权了的账号
		echo "grant all on keystone.* to 'keystone'@'localhost' identified by 'keystone';" |mysql -p'123456'
		echo "grant all on keystone.* to 'keystone'@'%' identified by 'keystone';" |mysql -p'123456'
		yum -y install openstack-keystone httpd mod_wsgi		安装keystone,需要http启动,http需要mod_wsgi和python通讯
		vim /etc/keystone/keystone.conf
			[database]
			connection = mysql+pymysql://keystone:keystone@192.168.2.243/keystone		数据库配置 640
			[token]
			provider = fernet
		su -s /bin/sh -c "keystone-manage db_sync" keystone		初始化身份认证服务,用keystone初始化这样产生的日志用户和组都为keystone
		验证mysql -h 192.168.2.243 -ukeystone -p'keystone' -e "use keystone;show tables;"		验证是否成功查看表
		keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone		初始化key
		keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
		keystone-manage bootstrap --bootstrap-password admin --bootstrap-admin-url http://192.168.2.243:35357/v3/ --bootstrap-internal-url http://192.168.2.243:35357/v3/ --bootstrap-public-url http://192.168.2.243:5000/v3/ --bootstrap-region-id RegionOne		初始化初始用户admin,以及三个访问的URL(在数据库中配置)
		vim /etc/httpd/conf/httpd.conf
			ServerName 192.168.2.243:80
		ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/		用到http管理keystone所以写http的配置文件
		systemctl enable httpd.service					
		systemctl start httpd.service						不需要启动keystone,因为http管理keystone直接启动http就可以启动keystone
		检查netstat -tnlp  (mysql3306,memcached11211,rabbitmq5672,keystone5000和35357)
		vim admin-openstack		设置环境变量的脚本
			export OS_USERNAME=admin		登陆用户名     设置环境变量(让openstack命令默认以admin执行,就不用加一堆参数)
			export OS_PASSWORD=admin		密码
			export OS_PROJECT_NAME=admin		项目
			export OS_USER_DOMAIN_NAME=default	默认的域
			export OS_PROJECT_DOMAIN_NAME=default
			export OS_AUTH_URL=http://192.168.2.243:35357/v3	验证的url
			export OS_IDENTITY_API_VERSION=3			指定版本
			export OS_IMAGE_API_VERSION=2
		vim demo-openstack
			export OS_USERNAME=demo
			export OS_PASSWORD=demo
			export OS_PROJECT_NAME=demo
			export OS_USER_DOMAIN_NAME=default
			export OS_PROJECT_DOMAIN_NAME=default
			export OS_AUTH_URL=http://192.168.2.243:5000/v3
			export OS_IDENTITY_API_VERSION=3
			export OS_IMAGE_API_VERSION=2
		source admin-openstack		(unset OS_AUTH_URL OS_PASSWORD   清除环境变量)
		openstack project create --domain default   --description "Service Project" service		创建一个项目,接下来配置其他组件需用用户和密码,而用户必须属于什么项目
		openstack project create --domain default   --description "Demo Project" demo			创建一个demo项目,演示用的	
		openstack user create --domain default   --password-prompt demo				创建demo用户,这里需要设置用户密码(密码demo)
		openstack role create user								创建一个角色,一般有两个角色(一个为admin,这个是普通角色)权限的意思
		openstack role add --project demo --user demo user					将demo加入demo项目并赋予user角色(权限)
		验证keyston
			unset OS_AUTH_URL OS_PASSWORD		清除openstack命令使用的admin的环境变量
			openstack --os-auth-url http://192.168.2.243:35357/v3 --os-project-domain-name default --os-user-domain-name default --os-project-name admin --os-username admin token issue  	使用admin用户是否能获取令牌(需要输入admin密码)
			source admin-openstack
			openstack token issue
	镜像服务Glance部署		(api9292,registry9191)
		echo "create database glance;" |mysql -p'123456'
		echo "grant all on glance.* to 'glance'@'localhost' identified by 'glance';" |mysql -p'123456'
		echo "grant all on glance.* to 'glance'@'%' identified by 'glance';" |mysql -p'123456'	
		source admin-openstack
		openstack user create --domain default   --password-prompt glance			输入密码
		openstack role add --project service --user glance admin
		yum -y install openstack-glance
		vim /etc/glance/glance-api.conf				api要和数据库通讯
			[database]
			connection = mysql+pymysql://glance:glance@192.168.2.243/glance  	1748
			[keystone_authtoken]			直接按o下一行添加	3179
			auth_uri = http://192.168.2.243:5000		keystone地址
			auth_url = http://192.168.2.243:35357
			memcached_servers = 192.168.2.243:11211
			auth_type = password
			project_domain_name = default
			user_domain_name = default
			project_name = service
			username = glance
			password = glance
			[paste_deploy]
			flavor = keystone		开启使用keystone验证	3990
			[glance_store]
			stores = file,http					1865
			default_store = file					1896
			filesystem_store_datadir = /var/lib/glance/images/	2197
		vim /etc/glance/glance-registry.conf				registry要和数据库通讯
			[database]
			connection = mysql+pymysql://glance:glance@192.168.2.243/glance 	1038
			auth_uri = http://192.168.2.243:5000		keystone地址
			auth_url = http://192.168.2.243:35357
			memcached_servers = 192.168.2.243:11211
			auth_type = password
			project_domain_name = default
			user_domain_name = default
			project_name = service
			username = glance
			password = glance
			[paste_deploy]
			flavor = keystone		开启使用keystone验证	3990
		su -s /bin/sh -c "glance-manage db_sync" glance			同步数据库
		验证mysql -h 192.168.2.243 -uglance -p'glance' -e "use glance;show tables;"		
		systemctl enable openstack-glance-api.service   openstack-glance-registry.service
		systemctl start openstack-glance-api.service   openstack-glance-registry.service
		openstack service create --name glance   --description "OpenStack Image" image
		openstack endpoint create --region RegionOne   image public http://192.168.2.243:9292	创建服务的端点,通过端点访问服务
		openstack endpoint create --region RegionOne   image internal http://192.168.2.243:9292
		openstack endpoint create --region RegionOne   image admin http://192.168.2.243:9292
		验证
			wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
			source admin-openstack
			openstack image create "cirros"   --file cirros-0.3.4-x86_64-disk.img   --disk-format qcow2 --container-format bare   --public           	上传镜像保存在/var/lib/glance/images/
			openstack image list	
	控制器节点Nova部署
		控制节点node1
		echo "create database nova;" |mysql -p'123456'
		echo "grant all on nova.* to 'nova'@'localhost' identified by 'nova';" |mysql -p'123456'
		echo "grant all on nova.* to 'nova'@'%' identified by 'nova';" |mysql -p'123456'
		echo "create database nova_api;" |mysql -p'123456'
		echo "grant all on nova_api.* to 'nova'@'localhost' identified by 'nova';" |mysql -p'123456'
		echo "grant all on nova_api.* to 'nova'@'%' identified by 'nova';" |mysql -p'123456'
		echo "create database nova_cell0;" |mysql -p'123456'
		echo "grant all on nova_cell0.* to 'nova'@'localhost' identified by 'nova';" |mysql -p'123456'
		echo "grant all on nova_cell0.* to 'nova'@'%' identified by 'nova';" |mysql -p'123456'
		source admin-openstack
		openstack user create --domain default   --password-prompt nova		输入密码
		openstack role add --project service --user nova admin
		openstack service create --name nova   --description "OpenStack Compute" compute					创建nova服务实体
		openstack endpoint create --region RegionOne   compute public http://192.168.2.243:8774/v2.1		创建nova的端点
		openstack endpoint create --region RegionOne   compute internal http://192.168.2.243:8774/v2.1
		openstack endpoint create --region RegionOne   compute admin http://192.168.2.243:8774/v2.1
		openstack user create --domain default --password-prompt placement
		openstack role add --project service --user placement admin
		openstack service create --name placement --description "Placement API" placement
		openstack endpoint create --region RegionOne placement public http://192.168.2.243:8778
		openstack endpoint create --region RegionOne placement internal http://192.168.2.243:8778
		openstack endpoint create --region RegionOne placement admin http://192.168.2.243:8778
		yum -y install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api
		vim /etc/nova/nova.conf
			[DEFAULT]
			use_neutron=True							使用neutron组件		2062
			firewall_driver = nova.virt.firewall.NoopFirewallDriver			关闭nova的防火墙,因为使用neutron的防火墙	3266
			[DEFAULT]
			enabled_apis=osapi_compute,metadata					启用那些api 3052
			[DEFAULT]
			auth_strategy=keystone						14
			[api_database]
			connection=mysql+pymysql://nova:nova@192.168.2.243/nova_api		nova两个数据库的配置 3661
			[database]
			connection=mysql+pymysql://nova:nova@192.168.2.243/nova		4678
			[glance]
			api_servers=http://192.168.2.243:9292				和glance的api_servers通讯的配置	4815
			[keystone_authtoken]	直接换行					nova和keystone通讯之间的配置  5431
			auth_uri = http://192.168.2.243:5000
			auth_url = http://192.168.2.243:35357
			memcached_servers = 192.168.2.243:11211
			auth_type = password
			project_domain_name = default
			user_domain_name = default
			project_name = service
			username = nova
			password = nova
			[oslo_concurrency]
			lock_path=/var/lib/nova/tmp						索路径,创建一些索文件保存的位置	6708
			[DEFAULT]
			transport_url=rabbit://openstack:openstack@192.168.2.243		nova使用消息队列的配置	2
			[vnc]
			enabled=true
			vncserver_listen=0.0.0.0						vnc监听本地所有端口	8387
			vncserver_proxyclient_address=192.168.2.243				proxy如何找到vnc		8399
			vncserver_proxyclient_address=192.168.2.243
			[placement]
			os_region_name = RegionOne
			project_domain_name = Default
			project_name = service
			auth_type = password
			user_domain_name = Default
			auth_url = http://192.168.2.243:35357/v3
			username = placement
			password = placement
		vim /etc/httpd/conf.d/00-nova-placement-api.conf
			<Directory /usr/bin>			加到8778里面
			   <IfVersion >= 2.4>
			      Require all granted
			   </IfVersion>
			   <IfVersion < 2.4>
			      Order allow,deny
			      Allow from all
			   </IfVersion>
			</Directory>
		systemctl restart httpd
		http://blog.sina.com.cn/s/blog_86aa5b070102w8sx.html	下面一条命令有有问题
		进去mysql看下mysql库的表
		su -s /bin/sh -c "nova-manage api_db sync" nova			同步数据库
		su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
		su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova	有输出
		su -s /bin/sh -c "nova-manage db sync" nova
		验证	nova-manage cell_v2 list_cells
				| cell0 | 00000000-0000-0000-0000-000000000000 |                 none:/                | mysql+pymysql://nova:****@192.168.2.243/nova_cell0 |
				| cell1 | 816e29eb-7cbc-4db7-9400-f1577ac47e1e | rabbit://openstack:****@192.168.2.243 |    mysql+pymysql://nova:****@192.168.2.243/nova    |
		systemctl enable openstack-nova-api.service   openstack-nova-consoleauth.service openstack-nova-scheduler.service   openstack-nova-conductor.service openstack-nova-novncproxy.service
		systemctl start openstack-nova-api.service   openstack-nova-consoleauth.service openstack-nova-scheduler.service   openstack-nova-conductor.service openstack-nova-novncproxy.service
	计算节点Nova-Compute安装	(可以分开一台机出来配置,也可以配置到一起)(对创建的虚拟机进行计算,使用kvm创建虚拟机)(多个计算节点间主机名一定要不同)
		yum -y install openstack-nova-compute		
		scp -r /etc/nova/nova.conf 192.168.2.244:/root/nova/
		vim /etc/nova/nova.conf	配置到另一个节点上时做nova.conf与上面的区别是
			connection=mysql+pymysql://nova:nova@192.168.2.243/nova_api			删除
			connection=mysql+pymysql://nova:nova@192.168.2.243/nova			删除
			[vnc]
			vncserver_proxyclient_address=192.168.2.244			计算节点的ip	8400
			vncserver_listen=192.168.2.244					计算节点ip	
			novncproxy_base_url=http://192.168.2.243:6080/vnc_auto.html	控制节点的ip	8419
		egrep -c '(vmx|svm)' /proc/cpuinfo		结果不能是零(判断虚拟机是否支持虚拟化)
		chown root.nova /etc/nova/nova.conf
		systemctl enable libvirtd.service openstack-nova-compute.service
		systemctl start libvirtd.service openstack-nova-compute.service
		在控制节点上验证
			openstack compute service list		
				 6  | nova-compute     | bogon | nova     | enabled | up    | 2019-07-28T09:00:59.000000
		su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
	增加计算节点
		yum -y install openstack-nova-compute
		拷贝一个计算节点的nova.conf到改计算节点上
		修改配置文件下的
			vncserver_proxyclient_address=192.168.2.244			计算节点的ip	8400
			vncserver_listen=192.168.2.244					计算节点ip	
		注意拷贝后文件的权限
		systemctl enable libvirtd.service openstack-nova-compute.service
		systemctl start libvirtd.service openstack-nova-compute.service
		在控制节点上验证
			source admin-openstack
			openstack compute service list
		su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
		然后配置neutron
	控制节点网络服务Neutron部署
		echo "create database neutron;" |mysql -p'123456'
		echo "grant all on neutron.* to 'neutron'@'localhost' identified by 'neutron';" |mysql -p'123456'
		echo "grant all on neutron.* to 'neutron'@'%' identified by 'neutron';" |mysql -p'123456'
		openstack user create --domain default   --password-prompt neutron
		openstack role add --project service --user neutron admin
		openstack service create --name neutron   --description "OpenStack Networking" network
		openstack endpoint create --region RegionOne network public http://192.168.2.243:9696
		openstack endpoint create --region RegionOne network internal http://192.168.2.243:9696
		openstack endpoint create --region RegionOne network admin http://192.168.2.243:9696
		yum -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
		vim /etc/neutron/neutron.conf
			[database]
			connection = mysql+pymysql://neutron:neutron@192.168.2.243/neutron			
			[DEFAULT]
			auth_strategy = keystone		
			core_plugin = ml2				
			service_plugins =			
			[keystone_authtoken]	
			auth_uri = http://192.168.2.243:5000
			auth_url = http://192.168.2.243:35357
			memcached_servers = 192.168.2.243:11211
			auth_type = password
			project_domain_name = default
			user_domain_name = default
			project_name = service
			username = neutron
			password = neutron
			[DEFAULT]
			transport_url=rabbit://openstack:openstack@192.168.2.243	
			notify_nova_on_port_status_changes = True		
			notify_nova_on_port_data_changes = True		
			[nova]			
			auth_url = http://192.168.2.243:35357
			auth_type = password
			project_domain_name = default
			user_domain_name = default
			region_name = RegionOne
			project_name = service
			username = nova
			password = nova
			[oslo_concurrency]
			lock_path = /var/lib/neutron/tmp			
		vim /etc/neutron/plugins/ml2/ml2_conf.ini
			[ml2]
			type_drivers = flat,vlan			
			tenant_network_types =		
			mechanism_drivers = linuxbridge			
			extension_drivers = port_security			
			[ml2_type_flat]
			flat_networks = provider			
			[securitygroup]
			enable_ipset = true				
		vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
			[linux_bridge]
			physical_interface_mappings = provider:em1	
			[vxlan]
			enable_vxlan = false					
			[securitygroup]
			firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver	
			enable_security_group = true				
		vim /etc/neutron/dhcp_agent.ini
			[DEFAULT]
			interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver	
			dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq		
			enable_isolated_metadata = true			
		vim /etc/neutron/metadata_agent.ini
			[DEFAULT]
			nova_metadata_ip = 192.168.2.243			
			metadata_proxy_shared_secret = openstack		
		vim /etc/nova/nova.conf
			[neutron]			
			url = http://192.168.2.243:9696
			auth_url = http://192.168.2.243:35357
			auth_type = password
			project_domain_name = Default
			user_domain_name = Default
			region_name = RegionOne
			project_name = service
			username = neutron
			password = neutron
			service_metadata_proxy = True
			metadata_proxy_shared_secret = openstack		
		ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
		su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
		systemctl restart openstack-nova-api.service
		systemctl enable neutron-server.service   neutron-linuxbridge-agent.service neutron-dhcp-agent.service   neutron-metadata-agent.service
		systemctl start neutron-server.service   neutron-linuxbridge-agent.service neutron-dhcp-agent.service   neutron-metadata-agent.service
		验证
			openstack network agent list			三个up
	计算节点neutron部署
		yum -y install openstack-neutron-linuxbridge ebtables ipset
		vim /etc/neutron/neutron.conf
			[DEFAULT]
			auth_strategy = keystone
			[keystone_authtoken]
			auth_uri = http://192.168.2.243:5000
			auth_url = http://192.168.2.243:35357
			memcached_servers = 192.168.2.243:11211
			auth_type = password
			project_domain_name = default
			user_domain_name = default
			project_name = service
			username = neutron
			password = neutron
			[DEFAULT]
			transport_url=rabbit://openstack:openstack@192.168.2.243
			[oslo_concurrency]
			lock_path = /var/lib/neutron/tmp
		scp -r /etc/neutron/plugins/ml2/linuxbridge_agent.ini 192.168.2.244:/etc/neutron/plugins/ml2/
		vim /etc/nova/nova.conf
			[neutron]
			url = http://192.168.2.243:9696
			auth_url = http://192.168.2.243:35357
			auth_type = password
			project_domain_name = default
			user_domain_name = default
			region_name = RegionOne
			project_name = service
			username = neutron
			password = neutron
		systemctl restart openstack-nova-compute.service
		systemctl enable neutron-linuxbridge-agent.service
		systemctl start neutron-linuxbridge-agent.service
		验证 		控制节点上
			openstack network agent list	4个up
	验证创建虚拟机	控制节点
		source admin-openstack
		openstack network create  --share --external   --provider-physical-network provider   --provider-network-type flat provider	创建网络
		openstack subnet create --network provider   --allocation-pool start=192.168.2.150,end=192.168.2.253   --dns-nameserver 114.114.114.114 --gateway 192.168.2.1   --subnet-range 192.168.2.0/24 provider		创建子网
		验证
			openstack network list
			openstack subnet list
		openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano		创建云主机类型 cpu 内存m 硬盘g 名字
		source demo-openstack
		ssh-keygen -q -N ""
		openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey			上传公钥
		openstack security group rule create --proto icmp default
		openstack security group rule create --proto tcp --dst-port 22 default
		openstack network list			获取网络id下一条创建命令要用到
		openstack server create --flavor m1.nano --image cirros   --nic net-id=a069989b-d13e-47dc-a00e-a7562041e28c --security-group default   --key-name mykey provider-instance    类型 镜像 网络id 安全组 key 虚拟机名字
		这个class 'sqlalchemy.exc.OperationalError'报错 重启nova-api再执行创建命令 (systemctl restart openstack-nova-api.service)
		验证
			openstack server list
			| ID                                   | Name              | Status | Networks               | Image  | Flavor  |
			| 24ecffdc-9948-433e-b7b0-e1327fd160f4 | provider-instance | ACTIVE | provider=192.168.2.166 | cirros | m1.nano 
			openstack console url show provider-instance			获取控制台界面的url,需要主机名
			主机存放位置为节点的/var/lib/nova/instances/ 下 以主机id命名
			存放控制节点的key的原因
				控制节点会生成一个命名空间产生一个httpd的服务,存放key,而虚拟机可以访问该文件然后存放到自己的key中即可(自己的镜像需要写脚本完成这个功能)
	dashboard部署
		yum install openstack-dashboard
		vim /etc/openstack-dashboard/local_settings
			ALLOWED_HOSTS = ['*', ]
			OPENSTACK_API_VERSIONS = {					禁用对第3层网络服务的支持:
			    "identity": 3,
			    "image": 2,
			    "volume": 2,
			    "compute": 2,
			}
			OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
			OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
			OPENSTACK_HOST = "192.168.2.243"
			OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST		keystone
			OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
			TIME_ZONE = "Asia/Shanghai"
			    'enable_router': False,
			    'enable_quotas': False,
			    'enable_ipv6': False,
			    'enable_fip_topology_check': False,
		systemctl restart httpd
		测试
			http://192.168.2.243/dashboard/			域Default	账号密码admin
	自定义镜像(安装了kvm的机器)
		qemu-img create -f qcow2 /tmp/centos.qcow2 10G		创建一个大小为10G名字为centos.qcow2的镜像
		virsh net-list	首先得确认有默认的default的桥接网络,(virsh net-start default没有可以开启,但是计算节点上好像没有默认网络,所以可能需要另找一台机器装上kvm制作镜像)
		virt-install --name centos7u6 --ram 2048 --vcpus=1 --disk /tmp/centos.qcow2,format=qcow2 --network network=brqa069989b-d1 --graphics vnc,listen=0.0.0.0 --noautoconsole --os-type=linux --os-variant=centos7.0 --location=/samba/CentOS-7-x86_64-Minimal-1810.iso
		想安装说明和做什么自己做
		vim /etc/sysconfig/network-scripts/ifcfg-eth0	删除下面的行
			UUID				
			PEERDNS
			peerroutes
			HWADDR=
		systemctl restart network
		vim /tmp/metadata.sh
			#!/bin/bash
			set_key () {			控制节点免密登录
				if [ ! -d /root/.ssh ]; then
					mkdir -p /root/.ssh
					chmod 700 /root/.ssh
				fi
				ATTEMPTS=30
				FAILED=0
				for i in {1..5}
				do
					if [ ! -f /root/.ssh/authorized_keys ];then
						curl -f http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key \
						> /tmp/metadata-key 2>/dev/null
						if [ $? -eq 0 ]; then
						    cat /tmp/metadata-key >> /root/.ssh/authorized_keys
						    chmod 0600 /root/.ssh/authorized_keys
						    restorecon /root/.ssh/authorized_keys
						    rm -f /tmp/metadata-key
						    echo "Successfully retrieved public key from instance metadata"
						    echo "*****************"
						    echo "AUTHORIZED KEYS"
						    echo "*****************"
						    cat /root/.ssh/authorized_keys
						    echo "*****************"
						fi
					fi
				done
			}
			set_hostname () {								设置主机名
			        PRE_HOSTNAME=$(curl http://169.254.169.254/latest/meta-data/hostname)
			        echo $PRE_HOSTNAME >/etc/hostname
			}
			set_static_ip () {								给虚拟机设置静态ip
				PRE_IP=$(curl http://169.254.169.254/latest/meta-data/local-ipv4)
				NET_FILE="/etc/sysconfig/network-scripts/ifcfg-eth0"
				echo > $NET_FILE
				echo  "TYPE=Ethernet" >> $NET_FILZ		        
				echo  "NAME=eth0" >> $NET_FILE
				echo  "DEVICE=eth0" >> $NET_FILE
				echo  "ONBOOT=yes" >> $NET_FILE
				echo  "NETBOOT=yes" >> $NET_FILE
				echo  "BOOTPROTO=static" >> $NET_FILE
				echo  "IPADDR=$PRE_IP" >> $NET_FILE
				echo  "NETMASK=255.255.255.0" >> $NET_FILE
				echo  "GATEWAY=192.168.2.1" >> $NET_FILE
				}
			set_key 
			set_hostname
			set_static_ip
		chmod a+x /tmp/metadata.sh
		vim /etc/rc.local
				/bin/bash /tmp/metadata.sh
				rm -rf /etc/rc.local
				rm -rf /etc/rc.d/rc.local
				cp -rf /tmp/rc.local /etc/rc.d/rc.local
				ln -s /etc/rc.d/rc.local /etc/rc.local
		chmod a+x /etc/rc.d/rc.local
		source admin-openstack
		openstack image create "centos7"   --file /samba/centos7.img   --disk-format qcow2 --container-format bare   --public
	控制节点cinder部署 8776
		echo "create database cinder;" |mysql -p'123456'
		echo "grant all on cinder.* to 'cinder'@'localhost' identified by 'cinder';" |mysql -p'123456'
		echo "grant all on cinder.* to 'cinder'@'%' identified by 'cinder';" |mysql -p'123456'
		openstack user create --domain default   --password-prompt cinder
		openstack role add --project service --user cinder admin
		yum -y install openstack-cinder
		vim /etc/cinder/cinder.conf
			[database]
			connection = mysql+pymysql://cinder:cinder@192.168.2.243/cinder
			[DEFAULT]
			auth_strategy = keystone
			transport_url = rabbit://openstack:openstack@192.168.2.243
			[keystone_authtoken]
			auth_uri = http://192.168.2.243:5000
			auth_url = http://192.168.2.243:35357
			memcached_servers = 192.168.2.243:11211
			auth_type = password
			project_domain_name = default
			user_domain_name = default
			project_name = service
			username = cinder
			password = cinder
			[oslo_concurrency]
			lock_path = /var/lib/cinder/tmp
		su -s /bin/sh -c "cinder-manage db sync" cinder
		vim /etc/nova/nova.conf	
			[cinder]
			os_region_name = RegionOne
		systemctl restart openstack-nova-api.service
		systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
		systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service	
		openstack service create --name cinderv2   --description "OpenStack Block Storage" volumev2
		openstack service create --name cinderv3   --description "OpenStack Block Storage" volumev3
		openstack endpoint create --region RegionOne   volumev2 public http://192.168.2.243:8776/v2/%\(project_id\)s
		openstack endpoint create --region RegionOne   volumev2 internal http://192.168.2.243:8776/v2/%\(project_id\)s
		openstack endpoint create --region RegionOne   volumev2 admin http://192.168.2.243:8776/v2/%\(project_id\)s
		openstack endpoint create --region RegionOne   volumev3 public http://192.168.2.243:8776/v3/%\(project_id\)s
		openstack endpoint create --region RegionOne   volumev3 internal http://192.168.2.243:8776/v3/%\(project_id\)s
		openstack endpoint create --region RegionOne   volumev3 admin http://192.168.2.243:8776/v3/%\(project_id\)s
	存储节点cinder部署
		https://docs.openstack.org/cinder/pike/install/cinder-storage-install-rdo.html    
		yum -y install lvm2 device-mapper-persistent-data
		systemctl enable lvm2-lvmetad.service
		systemctl start lvm2-lvmetad.service
		pvcreate /dev/sdb
		vgcreate cinder-volumes /dev/sdb
		vim /etc/lvm/lvm.conf 
			devices {
				filter = [ "a/sdb/", "r/.*/"]
		yum -y install openstack-cinder targetcli python-keystone
		vim /etc/cinder/cinder.conf
			[database]
			connection = mysql+pymysql://cinder:cinder@192.168.2.243/cinder
			[DEFAULT]
			enabled_backends = lvm
			glance_api_servers = http://192.168.2.243:9292
			auth_strategy = keystone
			transport_url = rabbit://openstack:openstack@192.168.2.243
			[keystone_authtoken]
			auth_uri = http://192.168.2.243:5000
			auth_url = http://192.168.2.243:35357
			memcached_servers = 192.168.2.243:11211
			auth_type = password
			project_domain_name = default
			user_domain_name = default
			project_name = service
			username = cinder
			password = cinder
			[oslo_concurrency]
			lock_path = /var/lib/cinder/tmp
			[lvm]
			volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
			volume_group = cinder-volumes
			iscsi_protocol = iscsi
			iscsi_helper = lioadm
		systemctl enable openstack-cinder-volume.service target.service
		systemctl start openstack-cinder-volume.service target.service
		验证openstack volume service  list
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值