Kubernetes部署流程(学习版)

第一步 :准备环境

#配置网卡

vi /etc/sysconfig/network-scripts/
---------------------------------->
ONBOOT=yes
BOOTPROTO=static
IPADDR=192.168.1.251
NETMASK=255.255.252.0
GATEWAY=192.168.1.1
DNS1=8.8.8.8

#重启网卡

service network restart
vi /etc/selinux/config
SELINUX=disabled
setenforce 0   /生效关闭selinux
systemctl stop firewalld // 关闭防火墙
iptables -vnL // 查看状态
#其他虚拟机重复以上m

#修改主机名

vi /etc/hostname //修改主机name
hostname k8s-master // 修改生效
hostname k8s-node01 // 修改生效
hostname k8s-node02 // 修改生效

exit //退出

第二步 :自签Etcd SSL证书 及部署集群

mkdir k8s
mkdir k8s/k8s-cert -p
mkdir k8s/etcd-cert -p

yum install -y wget //下载安装wget

//CFSSL工具的安装
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
ls /usr/local/bin/cfssl*

ntpdate time.windows.com //更新系统时间

生成ca文件
cat > ca-config.json <<EOF
{
“signing”: {
“default”: {
“expiry”: “87600h”
},
“profiles”: {
“www”: {
“expiry”: “87600h”,
“usages”: [
“signing”,
“key encipherment”,
“server auth”,
“client auth”
]
}
}
}
}
EOF

cat > ca-csr.json <<EOF
{
“CN”: “etcd CA”,
“key”: {
“algo”: “rsa”,
“size”: 2048
},
“names”: [
{
“C”: “CN”,
“L”: “Beijing”,
“ST”: “Beijing”
}
]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

cat > server-csr.json <<EOF
{
“CN”: “etcd”,
“hosts”: [
“192.168.1.251”,
“192.168.1.253”,
“192.168.1.254”
],
“key”: {
“algo”: “rsa”,
“size”: 2048
},
“names”: [
{
“C”: “CN”,
“L”: “BeiJing”,
“ST”: “BeiJing”
}
]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server

mkdir soft

mkdir /opt/etcd/{cfg,bin,ssl} -p

下载、解压 etcd-v3.3.10-linux-amd64.tar.gz
tar zxvf etcd-v3.3.10-linux-amd64.tar.gz
mv etcd etcdctl /opt/etcd/bin/
ls /opt/etcd/bin/

// 上传etcd.sh /root/k8s/
chmod +x etcd.sh

cat etcd.sh
./etcd.sh etcd01 192.168.1.251 etcd02=https://192.168.1.253:2380,etcd03=https://192.168.1.254:2380

cp /root/k8s/etcd-cert/{ca,server-key,server}.pem /opt/etcd/ssl/
ls /opt/etcd/ssl/

systemctl start etcd
tail /var/log/messages -f \or\ journalctl -f -u etcd.service

scp -r /opt/etcd/ root@192.168.1.253:/opt/
scp -r /usr/lib/systemd/system/etcd.service root@192.168.1.253:/usr/lib/systemd/system
// 修改对应node节点
vi /opt/etcd/cfg/etcd

systemctl daemon-reload
systemctl start etcd
tail /var/log/messages -f

// 检验Etcd集群状态
/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints=“https://192.168.1.251:2379,https://192.168.1.253:2379,https://192.168.1.254:2379” cluster-health

// 访问地址 --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints=“https://192.168.1.251:2379,https://192.168.1.253:2379,https://192.168.1.254:2379”

第三步 Node安装Docker
// 安装系统工具
yum install -y yum-utils device-mapper-persistent-data lvm2
// 配置官方源
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
// 更新并安装Docker-CE
yum makecache fast
yum -y install docker-ce
// 引用docker 加速
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io

//开启Docker服务
service docker start (start, stop, restart, try-restart, reload, force-reload, status)

// 其他节点安装docker复制第三步

第四步 部署kubernetes网络 -Flannel(没有其他需要只安装在Node节点)

//写入分配的子网段到etcd,供flannels使用
/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints=“https://192.168.1.251:2379,https://192.168.1.253:2379,https://192.168.1.254:2379” set /coreos.com/network/config ‘{ “Network”: “172.17.0.0/16”, “Backend”: {“Type”: “vxlan”}}’

//导入flannel.sh文件
chmod +x flannel.sh
mkdir /opt/kubernetes/{bin,cfg,ssl} -p
./flannel.sh https://192.168.1.251:2379,https://192.168.1.253:2379,https://192.168.1.254:2379

//导入flannel安装包并解压
tar zxvf flannel-v0.10.0-linux-amd64.tar.gz
mv flanneld mk-docker-opts.sh /opt/kubernetes/bin/
systemctl start flanneld
ps -ef |grep flanneld // 查看flanneld是否启动
ps -ef |grep docker // 查看docker是否引用flannel分配的IP

vi /usr/lib/systemd/system/docker.service //
cat /run/flannel/subnet.env //

systemctl restart docker 重启docker
ps -ef |grep docker //已经引用flannel分配的IP

//将当前节点flannel 的配置文件copy到其他节点上
scp -r /opt/kubernetes/ root@192.168.1.254:/opt/
scp -r /usr/lib/systemd/system/{flanneld,docker}.service root@192.168.1.254:/usr/lib/systemd/system/

// 验证flannels是否部署成功
/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints=“https://192.168.1.251:2379,https://192.168.1.253:2379,https://192.168.1.254:2379” ls /coreos.com/network/subnets

/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints=“https://192.168.1.251:2379,https://192.168.1.253:2379,https://192.168.1.254:2379” get /coreos.com/network/subnets/172.17.60.0-24 {“PublicIP”:“192.168.1.248”,“BackendType”:“vxlan”,“BackendData”:{“VtepMAC”:“16:d9:18:a2:92:49”}}

第五步 部署master组件(部署到master节点)
//上传master.zip文件
yum install unzip -y
unzip master.zip

//上传kubernetes安装包
tar zxvf kubernetes-server-linux-amd64.tar.gz

mkdir -p /opt/kubernetes/{bin,cfg,ssl}

cp kube-apiserver kube-controller-manager kube-scheduler /opt/kubernetes/bin/

chmod +x apiserver.sh
./apiserver.sh 192.168.1.251 https://192.168.1.251:2379,https://192.168.1.253:2379,https://192.168.1.254:2379
vi /opt/kubernetes/cfg/kube-apiserver

KUBE_APISERVER_OPTS="–logtostderr=false \ //修改日志存储
–log-dir=/opt/kubernetes/logs \ // 添加日志存储目录
mkdir /opt/kubernetes/logs
/opt/kubernetes/bin/kube-apiserver --help | grep logs

//上传k8s-cert.sh文件
vi k8s-cert.sh // 添加k8s所有的ip地址
bash k8s-cert.sh //生成相关配置文件
cp ca.pem ca-key.pem server.pem server-key.pem /opt/kubernetes/ssl/

// 上传kubuconfig.sh
chmod +x kubeconfig.sh

//执行下面命令
BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008
cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,“system:kubelet-bootstrap”
EOF

cat token.csv //查看文件是否生成
mv token.csv /opt/kubernetes/cfg/
systemctl restart kube-apiserver
ps -ef |grep kube-apiserver

netstat -antp |grep 8080 // 查看监听状态

#---------------------- //日志排错
vi /opt/kubernetes/logs/kube-apiserver.INFO
source /opt/kubernetes/cfg/kube-apiserver
/opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
#----------------------

chmod +x controller-manager.sh
cat controller-manager.sh
./controller-manager.sh 127.0.0.1
./scheduler.sh 127.0.0.1

cp /root/soft/kubernetes/server/bin/kubectl /usr/bin/

kubectl get cs //查看当前etcd节点的状态 和(controller-manager, scheduler)

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

// 上传kubeconfig.sh文件

vi kubeconfig.sh //删除以下
#----------------------

创建 TLS Bootstrapping Token

#BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ’ ')
cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,“system:kubelet-bootstrap”
EOF
#----------------------

vi kubeconfig.sh //添加以下
#----------------------
BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008
#----------------------

bash kubeconfig.sh 192.168.1.251 /root/k8s/k8s-cert

scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.1.254:/opt/kubernetes/cfg/

cd /root/soft/kubernetes/server/bin/
scp kubelet kube-proxy root@192.168.1.253:/opt/kubernetes/bin/
scp kubelet kube-proxy root@192.168.1.254:/opt/kubernetes/bin/

第六步 部署node组件 kubelet
上传node.zip文件
unzip node.zip

bash kubelet.sh 192.168.1.253

vi /opt/kubernetes/cfg/kubelet // 修改文件日志路径

KUBELET_OPTS="–logtostderr=false
–log-dir=/opt/kubernetes/logs \

mkdir /opt/kubernetes/logs

systemctl restart kubelet

journalctl -u kubelet // 查看日志

第七步 部署node组件 kube-proxy

bash proxy.sh 192.168.1.253
ps -ef | grep proxy

// copy到其他node节点上
scp -r /opt/kubernetes/ root@192.168.1.254:/opt/
scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@192.168.1.254:/usr/lib/systemd/system/

// cd /opt/kubernetes/cfg 修改对应的ip节点
vi kubelet
vi kubelet.config
vi kube-proxy

cd /opt/kubernetes/ssl/ //删除上一个节点颁发的证书
rm -f *

kubectl get csr //获取请求签名(master节点执行命令)
kubectl certificate approve +请求签名 允许加入集群
// 如:kubectl certificate approve node-csr-WxFdb6gcM_uA7ySEtQ8bn16X04XBuKVoLz8L7Tlu4CM

kubectl get node //查看节点

第八步 部署管理页面

下载并修改Dashboard安装脚本
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta5/aio/deploy/recommended.yaml
修改recommended.yaml文件内容:

#增加直接访问端口
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort #增加
ports:
- port: 443
targetPort: 8443
nodePort: 30008 #增加
selector:
k8s-app: kubernetes-dashboard

##因为自动生成的证书很多浏览器无法使用,
##所以我们自己创建,注释掉kubernetes-dashboard-certs对象声明
#—
#apiVersion: v1
#kind: Secret
#metadata:

labels:

k8s-app: kubernetes-dashboard

name: kubernetes-dashboard-certs

namespace: kubernetes-dashboard

#type: Opaque



创建证书
mkdir dashboard-certs

cd dashboard-certs/

创建key文件

openssl genrsa -out dashboard.key 2048

#证书请求
openssl req -days 36000 -new -out dashboard.csr -key dashboard.key -subj ‘/CN=dashboard-cert’

#自签证书
openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt

#创建kubernetes-dashboard-certs对象
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard

安装Dashboard

#安装
kubectl create -f ~/recommended.yaml

#创建命名空间
kubectl create namespace kubernetes-dashboard

#检查结果
[root@k8s-master ~]# kubectl get service -n kubernetes-dashboard -o wide

NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
dashboard-metrics-scraper ClusterIP 10.96.113.127 8000/TCP 16s k8s-app=dashboard-metrics-scraper
kubernetes-dashboard NodePort 10.96.203.158 443:30008/TCP 16s k8s-app=kubernetes-dashboard

新建一个yaml文件:
#创建账号:
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: dashboard-admin
namespace: kubernetes-dashboard


#为用户分配权限:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: dashboard-admin-bind-cluster-role
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:

  • kind: ServiceAccount
    name: dashboard-admin
    namespace: kubernetes-dashboard

保存退出后执行
kubectl create -f dashboard-admin.yaml // 查看并复制用户Token

登录Dashboard
访问:https://192.168.174.137:30008,选择Token登录,复制刚才生成的密钥。
注意,IP为任意node节点的对外的IP.

第九步 Kubernetes Dashboard 设置用户密码登陆

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值