rg1-ceph101.cloud.bjtp.qihoo.net
rg1-ceph102.cloud.bjtp.qihoo.net
rg1-ceph103.cloud.bjtp.qihoo.net
VIP:
10.205.51.194
虚拟机IP段:s
10.205.144.0/24
部署docker和K8S在ceph的基础上 系统初始化每台都做 S3底层依赖于K8S
systemctl enable docker ##部署完ceph 那些包里有ceph命令
wget http://pub1-bjyt.s3.360.cn/kubernetes/rke_linux-amd64 ## 离线状态包拉进来即可
chmod +x rke_linux-amd64
mv rke_linux-amd64 /usr/bin/rke
wget http://pub1-bjyt.s3.360.cn/kubernetes/kubectl ## 离线状态包拉进来即可
chmod +x kubectl
mv kubectl /usr/bin/
mkdir /etc/docker ## 源码包需要手动创建
cat <<EOF | sudo tee /etc/docker/daemon.json
{
"registry-mirrors": ["https://8dexs4ag.mirror.aliyuncs.com"],
"insecure-registries": ["10.19.214.141:5000"], ##ip加端口从节点也写主节点的ip加端口
"oom-score-adjust": -1000,
"graph": "/data/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"live-restore": true,
"init": true
}
EOF
systemctl daemon-reload ##刷新
systemctl restart docker ##重启
swapoff -a ## 关闭swap
sed -i 's/.*swap.*/#&/' /etc/fstab
systemctl stop firewalld
systemctl disable firewalld
##关闭selinux
cat /etc/selinux/config | grep -E "SELINUX|SELINUXTYPE" | grep -E "disabled|targeted"
SELINUX=disabled
SELINUXTYPE=targeted
##调整内核参数
modprobe br_netfilter
cat >> /etc/sysctl.conf <<EOF
net.ipv4.ip_forward=1
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
EOF
sysctl -p ##刷新
yum install -y ipvsadm ipset ## 这步不用做 安装ceph的时候都有 which命令可以看一眼
##加载ipvs内核模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack_ipv4"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ \$? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
docker load -i 360container.tar ##离线环境导入镜像
docker load -i registry_latest.img ##离线环境导入镜像
mkdir /data/registry -p
创建启动端口映射
docker run -d -p 4000:5000 -v /data/registry:/var/lib/registry --restart=always --name registry registry:latest
for image in `docker images | grep -v REPOSITORY | awk 'BEGIN{OFS=":";ORS=" "}{print $1,$2}'`;
do
docker tag $image 10.19.114.190:4000/$image;
docker push 10.19.114.190:4000/$image;
done;
docker ps -a
docker images
docker build -t 10.205.52.117:4000/rancher/rancher:pub-1.1 . ##可以不做
docker push 10.205.52.117:4000/rancher/rancher:pub-1.1 ##可以不做
##更新版本出现问题时
错误:systemctl: symbol lookup error: /usr/lib/systemd/libsystemd-shared-243.so: undefined symbol: seccomp_api_get
解决方法:yum -y install libseccomp
然后编写yaml文件 以360stack用户的身份运行。**
cd /home/360stack
mkdir rancher
cd rancher
vim cluster.yml
address,
internal_address,
private_registries,prefix_path;
- '/mnt/fast-disks:/mnt/fast-disks:rshared' // 增加该配置,支持 S3 动态存 储卷发现
rke up ##运行启动
mkdir /root/.kube ##创建目录
cp kube_config_cluster.yml /root/.kube/config ##把生成的文件放过去
kubectl get node ##查看K8S节点
kubectl get all -A ##查看K8S所有状态
部署pika 主从 部署两台
主节点执行
tar zxvf pika-tob-deploy.tgz ##把pika包拉进来解压
mv pika /root/ ##把解压的目录挪到root下
cd /root/pika/scripts
./pika_init_server -s /root/pika/package ##初始化机器环境 -s指定pika安装包所在目录
mkdir data1
./pika_deploy -p 19221 -P 252617 ##部署主节点pika 指定端口-p 指定密码-P
ps -ef |grep pika ##查看pika进程是否启动成功
/usr/local/redis40/bin/redis-cli -p 19221 -a 252617 ##登录主节点
./pika_start -p 19221 启动
./pika_stop -p 19221 关闭
vim /etc/rc.d/rc.local ##设为开机自启
/root/pika/scripts/pika_start -p 19221 ##启动命令
从节点执行:**
mv pika /root/ ##把包拉进来 解压后放到root下
cd pika/scripts/
mkdir /data1
./pika_init_server -s /root/pika/package/ ##初始化机器环境 -s指定pika安装包所在目录
./pika_deploy -p 19221 -P 252617 -M 10.205.51.245 (主ip) ##部署从节点
ps -ef |grep pika ## 验证pika是否启动
/usr/local/redis40/bin/redis-cli -p 19221 -a 252617 info Replication ##登录
vim /data1/pika19221/pika19221.conf
slaveof : 10.205.51.245:19221 ##手动添加
vim /etc/rc.d/rc.local ##开机自启
/root/pika/scripts/pika_start -p 19221 ##启动命令
reboot ##主从重启下看看成没成功