k8s从零开始搭建部署

1、机器环境准备

hostnameip配置
k8s-master192.168.0.1042核4G
k8s-node1192.168.0.1112核4G
k8s-node2192.168.0.1122核4G

1.1、初始化机器脚步

#!/usr/bin/bash
while read -p "请输入您想设定的主机名:" name
do
	if [ -z "$name" ];then
		echo "您没有输入内容,请重新输入"
		continue
	fi
	read -p "您确认使用该主机名吗?[y/n]: " var

	if [ $var == 'y' -o $var == 'yes' ];then
		hostnamectl set-hostname $name
		break
	fi
done

echo "关闭防火墙"
systemctl stop firewalld
systemctl disable firewalld

echo "关闭selinux"
setenforce 0
sed -ri '/^SELINUX=/ s/enforcing/disabled/'  /etc/selinux/config

echo "解决sshd远程连接慢的问题"
sed -ri '/^GSSAPIAu/ s/yes/no/' /etc/ssh/sshd_config
sed -ri '/^#UseDNS/ {s/^#//;s/yes/no/}' /etc/ssh/sshd_config

echo "配置yum源"
if [ ! -d /etc/yum.repos.d/backup ];then
	mkdir /etc/yum.repos.d/backup
fi
mv /etc/yum.repos.d/* /etc/yum.repos.d/backup 2>/dev/null

if ! ping -c2 www.baidu.com &>/dev/null	
then
	echo "您无法上外网,不能配置yum源,请联系老师"
	exit	
fi
if rpm -qa |grep wget &>/dev/null ;then
	wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.163.com/.help/CentOS7-Base-163.repo &>/dev/null
else
	curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.163.com/.help/CentOS7-Base-163.repo &>/dev/null 
fi

echo "安装系统需要的软件,请稍等"
yum -y install tree wget bash-completion vim lftp &>/dev/null

echo "配置epel源,安装高级软件"
if [ ! -f /etc/yum.repos.d/epel.repo ];then
	wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo &>/dev/null
	exit
fi
yum -y install atop htop nethogs net-tools psmisc &>/dev/null

1.2、设置固定ip配置

vim /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=none
DEFROUTE=yes
IPV4_FAILURE_FATAL=yes
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=737e66ec-873d-4bb1-9067-145c7337a209
DEVICE=ens33
ONBOOT=yes
IPADDR=192.168.0.104
PREFIX=24
GATEWAY=192.168.0.1 
DNS1=114.114.114.114
IPV6_PRIVACY=no
DNS2=8.8.8.8
DNS3=202.101.172.35

systemctl restart network


systemctl enable network

2、三个节点都安装配置docker和k8s

2.1.docker

# step 1: 安装必要的一些系统工具
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
# Step 2: 添加软件源信息
sudo yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# Step 3
sudo sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
# Step 4: 更新并安装Docker-CE
sudo yum makecache fast
sudo yum -y install docker-ce
# Step 4: 开启Docker服务
sudo service docker start

添加如下内容:
vim /etc/docker/daemon.json

{
  "exec-opts": ["native.cgroupdriver=systemd"]
}

2.2.k8s指定版本

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum install -y kubelet-1.18.2-0 kubeadm-1.18.2-0 kubectl-1.18.2-0 --disableexcludes=kubernetes


3、安装k8s需要的镜像并将进行tag打标签,写入如下shell脚步进行下载对应镜像

#/bin/bash
images=(kube-apiserver:v1.18.2
        kube-controller-manager:v1.18.2
        kube-scheduler:v1.18.2
        kube-proxy:v1.18.2
        pause:3.2
        etcd:3.4.3-0
        coredns:1.6.7)
for image_name in ${images[@]};
do
        docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$image_name
        
        docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$image_name k8s.gcr.io/$image_name
done

4、每个节点必须都关闭交换分区

swapoff -a

echo "1" >/proc/sys/net/bridge/bridge-nf-call-iptables

5、进行k8s初始化,注意这里的--apiserver-advertise-address=192.168.0.104要使用master节点的ip地址

kubeadm init --kubernetes-version=v1.18.2 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.0.104 --ignore-preflight-errors=Swap --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

6、kubeadm init 后,会看到kubeadm join 内容。添加node节点,在node节点上执行

  

kubeadm join 192.168.0.104:6443 --token 0iheaf.binj4jssiok73a15 \
    --discovery-token-ca-cert-hash sha256:5a1cbce4f18b6b31a865e16c4c581baafea43487798825a8b2a18740c4f39a3a 

7、安装网络插件,直接执行下面的内容,复制粘贴即可,生成kube-flannel.yml文件

---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  seLinux:
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni-plugin
        image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
        command:
        - cp
        args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        volumeMounts:
        - name: cni-plugin
          mountPath: /opt/cni/bin
      - name: install-cni
        image: rancher/mirrored-flannelcni-flannel:v0.18.1
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: rancher/mirrored-flannelcni-flannel:v0.18.1
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: EVENT_QUEUE_DEPTH
          value: "5000"
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
        - name: xtables-lock
          mountPath: /run/xtables.lock
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni-plugin
        hostPath:
          path: /opt/cni/bin
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
      - name: xtables-lock
        hostPath:
          path: /run/xtables.lock
          type: FileOrCreate

8、现在开始安装网络插件,完成后所有节点都是Ready状态

kubectl apply -f kube-flannel.yml

9、查看pods状态

kubectl get pods -n kube-system -o wide

10、新加入node节点前,将master节点上的/etc/cni/net.d/10-flannel.conflist文件cp到新node节点上

 scp /etc/cni/net.d/10-flannel.conflist 192.168.0.106:/etc/cni/net.d/10-flannel.conflist 

10.1、初始化node节点

#拷贝 master机器上 $HOME/.kube/config 到node节点上
scp $HOME/.kube/config root@k8s-node1:~/
scp $HOME/.kube/config root@k8s-node2:~/
scp $HOME/.kube/config root@Jen-git:~/
#分别在node01和node02上执行下边命令
#不然执行kubectl 会报错
#error: no configuration has been provided, try setting KUBERNETES_MASTER environment variable
mkdir -p $HOME/.kube
sudo mv $HOME/config $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

11、加入node节点

kubeadm join 192.168.0.104:6443 --token ccxoyh.lhomkez88g11x2v6     --discovery-token-ca-cert-hash sha256:5a1cbce4f18b6b31a865e16c4c581baafea43487798825a8b2a18740c4f39a3a

12、重启master节点的kubelet

systemctl restart kubelet

14、查看nodes节点状态

kubectl  get nodes 
  • 8
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 4
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值