【Ubantu20】搭建# kubernetes集群

主机规划

作用IP地址操作系统配置
Master192.168.244.130ubantu20.042颗CPU 2G内存 50G硬盘
Node1192.168.244.131ubantu20.042颗CPU 2G内存 50G硬盘
Node2192.168.244.132ubantu20.042颗CPU 2G内存 50G硬盘

环境初始化

主机名设置:按照下面信息设置主机名
master节点: master
node节点:   node1
node节点:   node2
#命令
hostnamectl set-hostname master && bash
hostnamectl set-hostname node1 && bash
hostnamectl set-hostname node2 && bash

检查操作系统的版本

[root@master ~]# uname -a
Linux master 5.15.0-105-generic #115~20.04.1-Ubuntu SMP Mon Apr 15 17:33:04 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux

设置主机名 、解析

# 主机名成解析 编辑三台服务器的/etc/hosts文件,添加下面内容
192.168.244.130  master
192.168.244.131  node1
192.168.244.132  node2

#设置主机名
cat >> /etc/hosts <<EOF
192.168.244.130  master
192.168.244.131  node1
192.168.244.132  node2
EOF

时间同步

#查看时间状态  System clock synchronized: yes 为开启同步
timedatectl status
# 修改时区为上海 #先查看时区是否正常,不正确则替换为上海时区
timedatectl set-timezone Asia/Shanghai
#安装chrony,联网同步时间
apt install chrony -y && systemctl enable --now chronyd

查看防火墙

# 查看防火墙状态
[root@master ~]# ufw status
[root@master ~]# ufw enable
[root@master ~]# ufw disable

禁用selinux

#默认ubunt默认是不安装selinux的,如果没有selinux命令和配置文件则说明没有安装selinux,则下面步骤就不用做了
[root@master ~]# sed -ri 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config 
[root@master ~]# setenforce 0

关闭swap分区(k8s官网要求)

#注意:最好是安装虚拟机时就不要创建swap交换分区**
sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a

将桥接的IPv4流量传递到iptables的链

#(有一些ipv4的流量不能走iptables链,因为linux内核的一个过滤器,每个流量都会经过他,然后再匹配是否可进入当前应用进程去处理,所以会导致流量丢失),配置k8s.conf文件(k8s.conf文件原来不存在,需要自己创建的)
touch /etc/sysctl.d/k8s.conf
cat >> /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
net.ipv4.ip_forward=1
vm.swappiness=0
EOF

sysctl --system

设置服务器之间免密登陆(3台彼此之间均设置)

# 如果root 用户被禁止(Permission denied, please try again.)两种可能
# 1.SSH 配置了禁止 root 用户登录策略;
	#修改 /etc/ssh/sshd_config配置,允许登录
	#vim  /etc/ssh/sshd_config 文件中找到PermitRootLogin 行:PermitRootLogin yes
	#重启ssh  service sshd restart
# 2.root密码错误
	#先切换普通用户登录测试   输入 su 命令 输入你认为正确的密码 如果验证失败就是密码不对 更改root新密码 sudo passwd root
	#这里我们需要搞清楚这两条命令的区别,使用sudo su命令只是暂时提升用户的权限(暂时获取root权限),用到的密码其实还是当前普通账户的密码,但是su默认是切换到root账户,用的就是root账户的密码,所以你很有可能是没有设置root用户的密码。

ssh-keygen -t rsa
#将公钥发送给130 132
ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.244.130
ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.244.132
#测试连接
ssh master	
ssh node2

#重启服务器
[root@master ~]# reboot

安装docker


# 1 卸载旧版本docker
[root@master ~]# $ apt-get remove docker docker-engine docker.io containerd runc

# 2 更新软件包
[root@master ~]#sudo apt update
[root@master ~]#sudo apt upgrade

# 3 安装docker依赖
# Docker在Ubuntu上依赖一些软件包。执行以下命令来安装这些依赖:
[root@master ~]# apt-get install ca-certificates curl gnupg lsb-release

# 4 添加Docker官方GPG密钥 
# 执行以下命令来添加Docker官方的GPG密钥:
[root@master ~]# curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -

#添加Docker软件源  
[root@master ~]# sudo add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"

#安装docker
[root@master ~]# apt-get install docker-ce docker-ce-cli containerd.io

#配置用户组(可选) 注:重新登录才能使更改生效。
[root@master ~]# sudo usermod -aG docker $USER


# 6 检查docker状态和版本
[root@master ~]# systemctl status docker 
[root@master ~]# docker version

安装kubernetes组件

安装Containerd(所有节点执行)

#安装Containerd(所有节点执行)
从 k8s 1.25 开始使用 containerd 来作为底层容器支持,根据 k8s 和 containerd 的匹配要求,这里我们使用 containerd 1.7.0
wget -c https://github.com/containerd/containerd/releases/download/v1.7.0/containerd-1.7.0-linux-amd64.tar.gz
tar -xzvf containerd-1.7.0-linux-amd64.tar.gz
#containerd可执行文件都在bin目录里面
mv bin/* /usr/local/bin/

##使用systemcd来管理containerd
#浏览器访问 https://raw.githubusercontent.com/containerd/containerd/main/containerd.service 
#创建一个文件名为containerd.service 内容为访问地址访问到的内容
touch containerd.service

#start
cat <<EOF | sudo tee containerd.service
# Copyright The containerd Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd

Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5

# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity

# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target

EOF
#end

mv containerd.service  /usr/lib/systemd/system/
systemctl daemon-reload && systemctl enable --now containerd 
systemctl  status containerd

#安装runc
#runc是容器运行时,runc实现了容器的init,run,create,ps...我们在运行容器所需要的cmd:
curl -LO https://github.com/opencontainers/runc/releases/download/v1.1.1/runc.amd64 && install -m 755 runc.amd64 /usr/local/sbin/runc

#安装 CNI plugins
wget -c https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz
#根据官网的安装步骤来,创建一个目录用于存放cni插件
mkdir -p /opt/cni/bin
tar -xzvf  cni-plugins-linux-amd64-v1.1.1.tgz -C /opt/cni/bin/

#修改containerd的配置,因为containerd默认从k8s官网拉取镜像
#创建一个目录用于存放containerd的配置文件
mkdir -p /etc/containerd
#把containerd配置导出到文件
containerd config default | sudo tee /etc/containerd/config.toml
#修改配置文件
vim /etc/containerd/config.toml
sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6"	 #搜索sandbox_image,把原来的k8s.gcr.io/pause:3.6改为"registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6" 
SystemdCgroup = true		#搜索SystemdCgroup,把这个false改为true (不是systemd_cgroup 我就是改错了 后面一系列报错)				
 
#创建镜像加速的目录 
mkdir /etc/containerd/certs.d/docker.io -pv
#配置加速
cat > /etc/containerd/certs.d/docker.io/hosts.toml << EOF
server = "https://docker.io"
[host."https://b9pmyelo.mirror.aliyuncs.com"]
  capabilities = ["pull", "resolve"]
EOF
 
#加载containerd的内核模块
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

sudo modprobe overlay
sudo modprobe br_netfilter
 
#重启containerd
systemctl restart containerd 
systemctl status containerd

#拉取镜像,测试containerd是否能创建和启动成功
ctr i pull docker.io/library/nginx:alpine		#能正常拉取镜像说明没啥问题
ctr images ls									#查看镜像
ctr c create --net-host docker.io/library/nginx:alpine nginx #创建容器
ctr task start -d nginx							#启动容器,正常说明containerd没啥问题
ctr containers ls 								#查看容器
ctr tasks kill -s SIGKILL  nginx				#终止容器
ctr containers rm nginx							#删除容器

配置kubernetes的阿里云apt源(所有节点执行)

cp /etc/apt/sources.list /etc/apt/sources.list.backup

cat > /etc/apt/sources.list <<EOF			
deb https://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse
deb-src https://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse
deb https://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse
deb-src https://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse
deb https://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse
deb-src https://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse
deb https://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse
deb-src https://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse
EOF

apt update
apt install apt-transport-https ca-certificates -y
apt install vim lsof net-tools zip unzip tree wget curl bash-completion pciutils gcc make lrzsz tcpdump bind9-utils -y 
# 编辑镜像源文件,文件末尾加入阿里云k8s镜像源配置
echo 'deb https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial main' >> /etc/apt/sources.list
#更新证书
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | sudo apt-key add
#更新源
apt update

安装kubeadm、kubelet和kubectl(所有节点执行)

#在3台虚拟机上都执行安装kubeadm、kubelet、kubectl
#查看apt可获取的kubeadm版本,这里安装1.25.0版本,不指定版本的话默认安装最新版本
apt-cache madison  kubeadm
#在所有节点上安装kubeadm、kubelet、kubectl
apt install -y kubelet=1.25.0-00 kubeadm=1.25.0-00 kubectl=1.25.0-00
#设置kubelet开机自启(先不用启动,也起不了,后面kubeadm init初始化master时会自动拉起kubelet)
systemctl enable kubelet
#标记软件包,避免自动更新
sudo apt-mark hold kubelet kubeadm kubectl

初始化master节点控制面板(master节点执行)

#列出所需镜像,可以提前拉取镜像
kubeadm  config images list --kubernetes-version=v1.25.0 --image-repository=registry.aliyuncs.com/google_containers
#关闭分区
swapoff -a
vim /etc/fstab #注释自动挂载swapoff
kubeadm  config images pull --kubernetes-version=v1.25.0 --image-repository=registry.aliyuncs.com/google_containers
#下载的镜像默认存放在containerd的k8s.io命名空间
#如果拉取失败  查看kubeadm config所需的镜像 
    # kubeadm config images list
    registry.k8s.io/kube-apiserver:v1.25.16
    registry.k8s.io/kube-controller-manager:v1.25.16
    registry.k8s.io/kube-scheduler:v1.25.16
    registry.k8s.io/kube-proxy:v1.25.16
    registry.k8s.io/pause:3.8
    registry.k8s.io/etcd:3.5.4-0
    registry.k8s.io/coredns/coredns:v1.9.3
    # 使用docker拉取国内镜像
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.25.16
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.25.16
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.25.16
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.25.16
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.8
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.4-0
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.9.3

    #对images重命名
    #根据上面所需依赖 对这些镜像进行重命名(这里要注意重命名的版本号有的是带v的,有的是不带的)注意看清楚。
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.25.16 registry.k8s.io/kube-apiserver:v1.25.16
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.25.16 registry.k8s.io/kube-controller-manager:v1.25.16
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.25.16 registry.k8s.io/kube-scheduler:v1.25.16
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.25.16 registry.k8s.io/kube-proxy:v1.25.16
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.8 registry.k8s.io/pause:3.8
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.4-0 registry.k8s.io/etcd:3.5.4-0
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.9.3 registry.k8s.io/coredns/coredns:v1.9.3
    
    #删除tag之前的镜像
    docker rmi xxxx

# 初始化kubernetes:kubeadm init --help可以查看命令的具体参数用法 
# kubeadm reset 此命令将删除当前集群的状态信息,并使其回到初始状态。
kubeadm init \
--apiserver-advertise-address=192.168.244.130 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.25.0 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16

#最后kubeadm init初始化成功,提示信息如下:
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
  export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.244.130:6443 --token j5032u.086ye89o88tv09vu \
        --discovery-token-ca-cert-hash sha256:646ab6b7e1cbac31430835a3f1e70ba1258da33a49b6d923787f63d8f8591c9d
 
#我们根据输入的提示信息复制粘贴照着做即可
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf

node节点加入k8s集群(node节点执行)

# 将kubeadm init成功执行后的kubeadm join信息,复制到node节点执行。
#注意:这段kubeamd join命令的token只有24h,24h就过期,需要执行kubeadm token create --print-join-command 重新生成。
#node节点执行
kubeadm join 192.168.244.130:6443 --token j5032u.086ye89o88tv09vu \
        --discovery-token-ca-cert-hash sha256:646ab6b7e1cbac31430835a3f1e70ba1258da33a49b6d923787f63d8f8591c9d

# 查看集群状态 此时的集群状态为NotReady,这是因为还没有配置网络插件
[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES    AGE     VERSION
master   NotReady   control-plane   68m   v1.25.0
node1    NotReady   <none>          16m   v1.25.0
node2    NotReady   <none>          16m   v1.25.0

部署容器网络

kubernetes支持多种网络插件,比如flannel、calico、canal等等,任选一种使用即可,本次选择calico
#下载calico
wget https://docs.projectcalico.org/manifests/calico.yaml
#编辑文件,找到下面这两句,去掉注释,修改ip为当前你设置的pod ip段
vim calico.yaml
- name: CALICO_IPV4POOL_CIDR
  value: "10.244.0.0/16"
#镜像拉取没有问题的话最好
kubectl apply -f calico.yaml 	
#主节点再次执行 发现集群状态为Ready 如果没有及时更新 稍等会
[root@master ~]# kubectl get nodes 
NAME     STATUS     ROLES    AGE     VERSION
master   Ready    control-plane   105m   v1.25.0
node1    Ready    <none>          53m    v1.25.0
node2    Ready    <none>          53m    v1.25.0

# 如果要在从节点执行kubectl命令,需要将主节点/etc/kubernetes/目录下的admin.conf文件拷贝到从节点/etc/kubernetes目录,然后设置环境变量
#这里使用scp 命令进行传输
scp /etc/kubernetes/admin.conf root@192.168.244.131:/etc/kubernetes/
#置环境变量
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

配置kubectl命令自动补全( master节点配置即可)

apt install -y bash-completion
echo 'source /usr/share/bash-completion/bash_completion' >> ~/.bashrc
echo 'source  <(kubectl completion bash)' >> ~/.bashrc
source ~/.bashrc
kubectl describe node

测试集群

#创建一个httpd服务测试
kubectl create deployment httpd --image=httpd
#暴露服务,端口就写80,如果你写其他的可能防火墙拦截了
kubectl expose deployment httpd --port=80 --type=NodePort
#查看pod是否是Running状态,查看service/httpd的端口
kubectl get pod,svc
NAME                         READY   STATUS    RESTARTS   AGE
pod/httpd-757fb56c8d-w42l5   1/1     Running   0          39s
NAME                 TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
service/httpd        NodePort    10.100.113.207   <none>        80:31561/TCP   42s			#外部端口31561
service/kubernetes   ClusterIP   10.96.0.1     <none>        443/TCP        3h22m
 
#网页测试访问,使用master节点的IP或者node节点的IP都可以访问,端口就是31561
http://192.168.244.130:31561/
It works!							#成功了

#扩展
#查看运行状态
kubectl get pods -n kube-system

NAME                                       READY   STATUS    RESTARTS         AGE
calico-kube-controllers-74677b4c5f-dcfd5   1/1     Running   10 (4m48s ago)   18h
calico-node-2qtrz                          1/1     Running   1 (26m ago)      18h
calico-node-jql5t                          1/1     Running   1 (24m ago)      17h
calico-node-nzq7p                          1/1     Running   1 (25m ago)      18h
coredns-c676cc86f-4lvnb                    1/1     Running   1 (25m ago)      19h
coredns-c676cc86f-g4jgn                    1/1     Running   1 (25m ago)      19h
etcd-master                                1/1     Running   2 (26m ago)      19h
kube-apiserver-master                      1/1     Running   3 (26m ago)      19h
kube-controller-manager-master             1/1     Running   13 (16h ago)     19h
kube-proxy-8wxx9                           1/1     Running   1 (25m ago)      18h
kube-proxy-qkbxl                           1/1     Running   1 (26m ago)      19h
kube-proxy-ws8q5                           1/1     Running   1 (24m ago)      17h
kube-scheduler-master                      1/1     Running   12 (26m ago)     19h

#查看日志 (方便查看运行错误 方便排查问题)
journalctl -xeu kubelet


#至此,kubernetes的集群环境搭建完成

参考:https://blog.csdn.net/weixin_37672801/article/details/135901793

  • 7
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值