################统一时间###############
date 1027100922.30
############docker的使用##########
cd images/
docker load -i nginx_latest.tar 加载镜像
mkdir /opt/xiandian
docker run -itd --name xiandian-dir -v /opt/xiandian -p 81:80 nginx:latest
docker ps -a
docker inspect +id | grep xiandian
docker inspect +id | grep volume
###########compose###############
cd compose/
ls
cp docker-compose /usr/local/bin/
chmod +x /usr/local/bin/docker-compose
mkdir ../composetest
ls ..
ls
cp app.py docker-compose.yml Dockerfile requirements.txt ../composetest/
cd
cat image.sh
cd images/
docker load -i python_3.5-alpine.tar
cd
./image.sh
cd composetest/
docker-compose up -d
############k8s平台安装##############
ls
./install.sh
kubectl get nodes
kubectl get cs
kubectl get pods -n kube-system
#############k8s平台使用#############
kubectl run nginx --image=nginx:latest --replicas=4 --image-pull-policy=Never
kubectl get pods
kubectl expose deploy/nginx --port 80
kubectl get svc
curl +id :80
ls
cd yaml/
ls
kubectl create namespace blog
kubectl create -f wordpress-db.yaml
kubectl get svc -n blog
vi wordpress.yaml 修改IP为 kubectl get svc -n blog 显示的
kubectl create -f wordpress.yaml
kubectl create -f wordpress-pod.yaml
kubectl get svc -n blog
ip:端口 (网页)
curl ip:端口
################raid#################
添加20G硬盘
yum源
cd /etc/yum.repos.d/
mount /dev/sr0 /mnt/centos/
yum clean all
yum repolist
fdisk /dev/sdb
cd
yum install -y mdadm
mdadm -Cv /dev/md0 -l 0 -n 2 /dev/sdb[1-2] 创建raid 0
mdadm -Ds 验证
mdadm -D /dev/md0 验证
mkfs.xfs /dev/md0 创建系统
mkdir /raid
mount /dev/md0 /raid/ 挂载
df -h 查看
umount /raid/ 删除raid的挂载
df -h
mdadm -S /dev/md0 停止md0
mdadm --zero-superblock /dev/sdb[1-2] 初始化磁盘
ls
lsblk
mdadm -Cv /dev/md5 -l 5 -n 3 /dev/sdb[1-3] --spare-devices=1 /dev/sdb4 创建raid5
mdadm -Ds 验证
mdadm -D /dev/md5
mkfs.ext4 /dev/md5
mount /dev/md5 /raid/
df -h 验证
###################kvm#####################
yum install -y qemu-kvm openssl libvirt
systemctl start libvirtd
ln -s /usr/libexec/qemu-kvm /usr/bin/qemu-kvm
ll
chmod 777 qemu-ifup-NAT
qemu-kvm -m 1024 -drive file=/root/cirros-0.3.4-x86_64-disk.img,if=virtio -net nic,model=virtio -net tap,script=/root/qemu-ifup-NAT -nographic -vnc :1
用户名+密码
ip addr list
关机 sudo poweroff
################读写分离####################
11:
hostnamectl set-hostname mycat
bash
setenforce 0
systemctl stop firewalld
vi /etc/hosts
192.168.200.11 mycat
192.168.200.12 db1
192.168.200.13 db2
cd /etc/yum.repos.d/
ls
rm -rf *
vi local.repo
[centos]
name=centos
baseurl=file:///mnt/centos
gpgcheck=0
enabled=1
[gpmall]
name=gpmall
baseurl=file:///mnt/gpmall
gpgcheck=0
enabled=1
yum clean all
yum repolist
mkdir /mnt/gpmall
cp -rvf gpmall-repo/* /mnt/gpmall/
12 13:
hostnamectl set-hostname db1 (db2)
bash
vi /etc/hosts
192.168.200.11 mycat
192.168.200.12 db1
192.168.200.13 db2
cd /etc/yum.repos.d/
ls
rm -rf *
vi local.repo
[centos]
name=centos
baseurl=ftp://192.168.200.11/centos
gpgcheck=0
enabled=1
yum clean all
yum repolist
11:
yum install -y java-1.8.0-openjdk java-1.8.0-openjdk-devel
java -version
ll
tar -xzvf Mycat-server-1.6-RELEASE-20161028204710-linux.tar.gz -C /usr/local/
ll /usr/local/
chmod -R 777 /usr/local/mycat/
echo export MYCAT_HOME=/usr/local/mycat/ >> /etc/profile
source /etc/profile
ll /usr/local/mycat/conf/
rm -rf /usr/local/mycat/conf/schema.xml
vi /usr/local/mycat/conf/schema.xml
改IP地址
vi /usr/local/mycat/conf/server.xml
删除后五行 TESTDB改为 USERDB
/bin/bash /usr/local/mycat/bin/mycat start
yum install -y net-tools
netstat -ntlp
yum install -y mariadb mariadb-client
mysql -h127.0.0.1 -P8066 -uroot -p123456
show databases;
12:
yum install -y mariadb mariadb-server
systemctl start mariadb
systemctl enable mariadb
mysql_secure_installation
vi /etc/my.cnf
第一行下面加:
log_bin=mysql-bin
binlog_ignore_db=mysql
server_id=12 (IP地址最后一位)
systemctl restart mariadb
mysql -uroot -p123456
grant all privileges on *.* to root@'%' identified by '123456';
grant replication slave on *.* to 'user'@'db2' identified by '123456';
flush privileges; (刷新)
13:
yum install -y mariadb mariadb-server
systemctl start mariadb
systemctl enable mariadb
mysql_secure_installation
vi /etc/my.cnf
第一行下面加:
log_bin=mysql-bin
binlog_ignore_db=mysql
server_id=13 (IP地址最后一位)
systemctl restart mariadb
mysql -uroot -p123456
change master to master_host='db1',master_user='user',master_password='123456';
start slave;
show slave status\G;
schema内容
<?xml version="1.0"?>
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">
<mycat:schema xmlns:mycat="http://io.mycat/">
<schema name="USERDB" checkSQLschema="true" sqlMaxLimit="100" dataNode="dn1"></schema>
<dataNode name="dn1" dataHost="localhost1" database="test" />
<dataHost name="localhost1" maxCon="1000" minCon="10" balance="3" dbType="mysql" dbDriver="native" writeType="0" switchType="1" slaveThreshold="100">
<heartbeat>select user()</heartbeat>
<writeHost host="hostM1" url="192.168.200.12:3306" user="root" password="123456">
<readHost host="hostS1" url="192.168.200.13:3306" user="root" password="123456" />
</writeHost>
</dataHost>
</mycat:schema>
###################zookeeper######################
yum源(gpmall)
hostnamectl set-hostname zookeeper (1 2 3)
vi /etc/hosts IP地址映射
scp /etc/hosts 192.168.200.12:/etc/hosts
scp /etc/hosts 192.168.200.13:/etc/hosts
yum install -y java-1.8.0-openjdk java-1.8.0-openjdk-devel
java -version
ll
scp zookeeper-3.4.14.tar.gz 192.168.200.12:/root
scp zookeeper-3.4.14.tar.gz 192.168.200.13:/root
ll
tar -xvzf zookeeper-3.4.14.tar.gz
ls
cd zookeeper-3.4.14
ls
cd conf/
ls
cp zoo_sample.cfg zoo.cfg
vi zoo.cfg
server.1=192.168.200.11:2888:3888
server.2=192.168.200.12:2888:3888
server.3=192.168.200.13:2888:3888
cd
先解压12 13节点zookeeper且复制zoo_sampli成zoo.cfg
scp zookeeper-3.4.14/conf/zoo.cfg 192.168.200.12:/root/zookeeper-3.4.14/conf/zoo.cfg
scp zookeeper-3.4.14/conf/zoo.cfg 192.168.200.13:/root/zookeeper-3.4.14/conf/zoo.cfg
mkdir /tmp/zookeeper
vi /tmp/zookeeper/myid
(11 12 13 里内容分别为 1 2 3)
cd
ls
cd zookeeper-3.4.14
cd bin/
ls
./zkServer.sh start (三个节点同时开启)(./zkserver.sh stop 关闭)
./zkServer.sh status
#####################kafka########################
11:
scp kafka_2.11-1.1.1.tgz 192.168.200.12:/root
scp kafka_2.11-1.1.1.tgz 192.168.200.13:/root
tar -xvzf kafka_2.11-1.1.1.tgz
cd kafka_2.11-1.1.1/config/
ls
vi server.properties
再这两行前加#
broker.id=0
zookeeper.connect=localhost:2181
再加三行
broker.id=1(每个节点的数字不同)
zookeeper.connect=192.168.200.11:2181,192.168.200.12:2181,192.168.200.13:2181
listeners = PLAINTEXT://192.168.200.11(每个节点的数字不同):9092
先把12 13节点的的kafka解压了
scp server.properties 192.168.200.12:/root/kafka_2.11-1.1.1/config/server.properties
scp server.properties 192.168.200.13:/root/kafka_2.11-1.1.1/config/server.properties
cd ../bin/
./kafka-server-start.sh -daemon ../config/server.properties
jps
./kafka-topics.sh --create --zookeeper 192.168.200.11:2181 --replication-factor 1 --partitions 1 --topic test
################故障排查################
systemctl start httpd
openstack-service status (如果出现0 systemctl start +那个id)
source /etc/keystone/admin-openrc.sh
#################keystone#################
1 用户:
openstack user create --domain xiandian --password 123456 --email alice@example.com alice
openstack user list
openstack user show (+用户id)
2 项目:
openstack project create --domain xiandian acme
openstack project list
openstack project show (+项目id)
3 角色:
openstack role create compute-user
openstack role list
openstack role show (+角色id)
4 绑定:
openstack role add --user alice --project acme conpute-user
5 查询:
openstack endpoint list
新增:
openstack user set --password 000000 yk (修改密码)
################glance管理###############
glance image-create --name cirros --disk-format qcow2 --container-format bare --progress< cirros-0.3.4-x86_64-disk.img (文件名)
glance image-list
glance image-show(镜像id)
glance image-update --min-ram 1024 +id (镜像id)
glance image-delete(镜像id)(delete删除镜像)
glance image-update --min-ram 1024 +id
glance image-update --min-disk 2 +id
################nova计算服务##############
nova secgroup-create test 'test the nova command about the rules'
nova flavor-list
nova boot --flavor m1.tiny --image (+glance image-list那个id) testcom
nova flavor-create test 6 2048 20 2 (名字 id 内存 磁盘 CPU核数)
nova flavor-show test
##################neutron####################
网页登陆:192.168.200.10/dashboard
自行排查服务状态
外部网络:
名称: ext-net
子网名称: ext-subnet
浮动网段: 192.168.200.100,192.168.200.200
网关: 192.168.200.1
内部网络:
名称:int-net1
子网名称:int-subnet1
浮动网段:10.0.0.100,10.0.0.200
网关:10.0.0.1
添加路由器:
名称:ext-router
添加网关
内部网络和外部网络连通
执行 neutron router-show ext-router
neutron agent-list -c binary
openstack network create --share sharenet1
neutron net-show sharenet1
neutron net-list
neutron agent-list
neutron agent-list -c binary -c id
neutron agent-show +(neutron agent-list -c binary -c id 里的id)
#################cinder#################
source /etc/keystone/admin-openrc.sh
cinder create --name extend-demo 2
cinder list
cinder type-create lvm
cinder type-list
cinder create --name type_test_demo --volume-type lvm 2
cinder extend extend-demo 3 (扩容到3G)
cinder show (硬盘id)
###############存储管理###################
openstack container create examtest
openstack container list
touch aaa.txt
openstack object create examtest aaa.txt
openstack object list examtest
openstack object show examtest aaa.txt
################Ansible###################
ansible --version
cd /etc/ansible
ls
vi ansible.cfg (注释掉host_key前面的#)
vi hosts
192.168.200.12或db1
192.168.200.13或db2
ansible all -m ping-k
ansible all -m copy -a 'src=/etc/hosts dest=/root/' -k
#################swift################
1.创建
swift post test (创建一个名为test的容器)
swift list test (查询test容器内的内存)
2.上传
mkdir file touch 1 2 3 (先创建几个目录和普通文件)
swift upload test file/ (先上传空白目录)
swift upload test/file 1 (上传1文件到test容器内的file目录)
mv 2 3 file/ (另一种上传方式)
swift upload test file/ (再上传file目录)
3.下载
swift download test file/3
4.删除容器内文件
swift delete test file/3
5.查看容器服务状态
swift stat
swift capabilities | grep max_file_size
##################lvm###################
Setenforce 0
Systemctl stop firewalld
lsblk
fdisk /dev/sdb 硬盘分区 n 默认 +5G(需要几个分区加几个)
p w(保存退出)
lsblk 查看
pvcreate /dev/sdb1 /dev/sdb2 创建物理卷
pvs
pvdisplay
vgcreate -s 16m xcloudvg /dev/sdb[1-2] 指定物理区域16M,创建卷组
vgdisplay
lsblk
vgextend xcloudvg /dev/sdb3 扩展卷组
vgs
lvcreate -L +6G -n mylv xcloudvg 创建逻辑卷,大小指定为5G
mkfs.ext4 /dev/mapper/xcloudvg-mylv 使用文件系统格式化逻辑卷
mount /dev/mapper/xcloudvg-mylv /mnt 挂载
df -lh
lvextend -L +2G /dev/mapper/xcloudvg-mylv 扩展逻辑卷
df -lh
resize2fs /dev/mapper/xcloudvg-mylv 更新文件系统
df -lh
检验:
vgdisplay
#################smb##################
setenforce 0
systemctl stop firewalld
yum install -y samba
vi /etc/samba/smb.conf
[global]下添加:
disable spoolss=yes
最后一行添加:
[share]
path=/opt/share
browseable=yes
public=yes
writable=yes
mkdir /opt/share
chmod 777 /opt/share
systemctl start smb nmb
smbpasswd -a root
systemctl restart smb nmb
验证:
yum install -y net-tools
netstat -ntpl
#################nfs###############
做之前还原系统到刚配置好yum源的状态
10节点:
setenforce 0
yum install -y nfs-utils rpcbind
vi /etc/exports
/mnt/test不换行 ip地址192.168.200.10/24(rw,no_root_squash,no_all_squash,sync,anonuid=501,anongid=501)
mkdir /mnt/test
exportfs -r
systemctl start nfs rpcbind
showmount -e 192.168.200.10
20节点:
setenforce 0
systemctl stop firewalld
yum install -y nfs-utils rpcbind
mount -t nfs 192.168.200.10:/mnt/test /mnt
检验:
20节点:
df -lh
cd /mnt
touch 1
mkdir 2
ll
10节点:
ll /mnt/test
###############网络配置##############
cd /etc/sysconfig/network-scripts
vi ifcfg-eno16777736(1804镜像-ens33)
改 B00TPR0T0=static
Onboot=yes
IPADDR=192.168.200.10
NETMASK=255.255.255.0
GATEWAY=192.168.200.2
DNS1=114.114.114.114
systemctl restart network
################yum源###############
cd /etc/yum.repos.d/
ls
rm -rf *
vi local.repo
[centos]
name=centos
baseurl=file:///mnt/centos
gpgcheck=0
enabled=1
mkdir /mnt/centos
mount /dev/sr0 /mnt/centos
yum clean all
yum repolist
20节点ftp协议:
cd /etc/yum.repos.d
ls
rm rf *
vi local.repo
[centos]
name=centos
baseurl=ftp://10节点的ip地址/centos
gpgcheck=0
enabled=1
转到10节点:
先配置selinux和防火墙:
setenforce 0
systemctl stop firewalld
安装ftp服务:yum install -y vsftpd
vi /etc/vsftpd/vsftpd.conf
最后一行添加: anon_root=/mnt
systemctl restart vsftpd (编辑完配置文件一定是重启服务)
转到20节点:
yum clean all
yum repolist