文章目录
最近深入研究k8s,打算用二进制安装包,逐个组件安装配置,环境起来之后,也方便后续了解源码时调试,以下是记录。
本机起3个centos 7.6 x64 4c8g虚拟机,分配如下
master
192.168.122.31 k8s1 etcd1
node
192.168.122.30 k8s2 etcd2
192.168.122.209 k8s3 etcd3
安装版本1.21,下载地址在官网:https://dl.k8s.io/v1.21.1/kubernetes-server-linux-amd64.tar.gz
其他版本:https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
国内装服务基本都会遇到官网镜像下载不下来的情况,我的方法是找国内源下载到本地后改名。
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.4.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.4.1 k8s.gcr.io/pause:3.4.1
安装etcd集群
vim /usr/lib/systemd/system/etcd.service
#文件内容如下,监听地址端口只支持ip地址,不支持hostname
[Unit]
Description=etcd server
After=network.target
[Service]
Type=notify
EnvironmentFile=-/etc/etcd/etcd.conf
ExecStart=/usr/bin/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
配置参数
vi /etc/etcd/etcd.conf
#[member]
ETCD_NAME=etcd3
ETCD_DATA_DIR="/var/lib/etcd"
ETCD_LISTEN_CLIENT_URLS="http://192.168.122.209:2379,http://127.0.0.1:2379"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.122.209:2379,http://127.0.0.1:2379"
#[cluster]
ETCD_LISTEN_PEER_URLS="http://192.168.122.209:2380"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.122.209:2380"
ETCD_INITIAL_CLUSTER="etcd1=http://192.168.122.31:2380,etcd2=http://192.168.122.30:2380,etcd3=http://192.168.122.209:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
所有节点操作
mkdir -p /var/lib/etcd
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd
systemctl status etcd
etcdctl member list
查询etcd集群状态
ENDPOINTS=192.168.122.31:2379,192.168.122.30:2379,192.168.122.209:2379
etcdctl --write-out=table --endpoints=$ENDPOINTS endpoint status
生成k8s证书
不记得从哪个版本开始不支持http方式访问apiserver了,反正1.21是不支持的,所以需要ssl证书。
mkdir -p /etc/kubernetes/cert
cd /etc/kubernetes/cert/
openssl genrsa -out ca.key 2048
#/CN=master主机名
openssl req -x509 -new -nodes -key ca.key -subj "/CN=k8s1" -days 5000 -out ca.crt
openssl genrsa -out server.key 2048
vim master_ssl.cnf
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.cluster.local
DNS.5 = k8s1
IP.1 = 10.244.0.1
IP.2 = 192.168.122.31
openssl req -new -key server.key -subj "/CN=k8s1" -config master_ssl.cnf -out server.csr
openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -days 5000 -extensions v3_req -extfile master_ssl.cnf -out server.crt
openssl genrsa -out cs_client.key 2048
openssl req -new -key cs_client.key -subj "/CN=k8s1" -config master_ssl.cnf -out cs_client.csr
openssl x509 -req -in cs_client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -days 5000 -out cs_client.crt
二进制安装k8s master3个服务
安装apiserver
mkdir -p /etc/kubernetes
vim /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API server
After=network.target
[Service]
EnvironmentFile=-/etc/kubernetes/apiserver
ExecStart=/usr/bin/kube-apiserver $KUBE_API_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
vim /etc/kubernetes/apiserver
KUBE_API_ARGS="--etcd-servers=http://192.168.122.31:2379,http://192.168.122.30:2379,http://192.168.122.209:2379 --client-ca-file=/etc/kubernetes/cert/ca.crt --tls-private-key-file=/etc/kubernetes/cert/server.key --tls-cert-file=/etc/kubernetes/cert/server.crt --kubelet-client-certificate=/etc/kubernetes/cert/cs_client.crt --kubelet-client-key=/etc/kubernetes/cert/cs_client.key --service-account-signing-key-file=/etc/kubernetes/cert/server.key --service-account-key-file=/etc/kubernetes/cert/server.key --service-account-issuer=https://kubernetes.default.svc --service-cluster-ip-range=10.244.0.0/16 --service-node-port-range=1-65535 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota --log-dir=/var/log/kubernetes --v=0 --allow-privileged=true"
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver
安装controller-manager
vim /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
After=kube-apiserver.service
Requires=kube-apiserver.service
[Service]
EnvironmentFile=-/etc/kubernetes/controller-manager
ExecStart=/usr/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
vim /etc/kubernetes/controller-manager
KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig=/etc/kubernetes/kubeconfig --cluster-cidr=10.245.0.0/16 --service-account-private-key-file=/etc/kubernetes/cert/server.key --cluster-signing-cert-file=/etc/kubernetes/cert/ca.crt --cluster-signing-key-file=/etc/kubernetes/cert/ca.key --root-ca-file=/etc/kubernetes/cert/ca.crt --log-dir=/var/log/kubernetes --v=0"
vim /etc/kubernetes/kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority: /etc/kubernetes/cert/ca.crt
server: https://192.168.122.31:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: system:kube-controller-manager
name: system:kube-controller-manager@kubernetes
current-context: system:kube-controller-manager@kubernetes
kind: Config
preferences: {}
users:
- name: system:kube-controller-manager
user:
client-certificate: /etc/kubernetes/cert/cs_client.crt
client-key: /etc/kubernetes/cert/cs_client.key
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager
安装kube-scheduler
vim /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
After=kube-apiserver.service
Requires=kube-apiserver.service
[Service]
EnvironmentFile=-/etc/kubernetes/scheduler
ExecStart=/usr/bin/kube-scheduler $KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
vim /etc/kubernetes/scheduler
KUBE_SCHEDULER_ARGS="--kubeconfig=/etc/kubernetes/kubeconfig --log-dir=/var/log/kubernetes --v=0"
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler
node节点安装服务
node 安装docker
操作略
分发ca文件
scp ca.crt k8s2:/etc/kubernetes/cert/
scp ca.crt k8s3:/etc/kubernetes/cert/
scp ca.key k8s3:/etc/kubernetes/cert/
scp ca.key k8s2:/etc/kubernetes/cert/
生成kubelet证书
openssl genrsa -out kubelet_client.key 2048
openssl req -new -key kubelet_client.key -subj "/CN=192.168.122.30" -out kubelet_client.csr
openssl x509 -req -in kubelet_client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -days 5000 -out kubelet_client.crt
openssl genrsa -out kubelet_client.key 2048
openssl req -new -key kubelet_client.key -subj "/CN=192.168.122.209" -out kubelet_client.csr
openssl x509 -req -in kubelet_client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -days 5000 -out kubelet_client.crt
node安装kubelet
vim /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet Server
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/bin/kubelet $KUBELET_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
mkdir -p /var/lib/kubelet
mkdir -p /etc/kubernetes
vim /etc/kubernetes/kubelet
KUBELET_ARGS="--kubeconfig=/etc/kubernetes/kubelet.conf --hostname-override=192.168.122.30 --cluster-dns=10.244.0.2 --cluster-domain=cluster.local --network-plugin=cni --log-dir=/var/log/kubernetes --v=0"
vim /etc/kubernetes/kubelet.conf
apiVersion: v1
clusters:
- cluster:
certificate-authority: /etc/kubernetes/cert/ca.crt
server: https://192.168.122.31:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
namespace: default
user: default-auth
name: default-context
current-context: default-context
kind: Config
preferences: {}
users:
- name: default-auth
user:
client-certificate: /etc/kubernetes/cert/kubelet_client.crt
client-key: /etc/kubernetes/cert/kubelet_client.key
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet
node安装kube-proxy
vim /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube-proxy Server
After=network.target
Requires=network.target
[Service]
EnvironmentFile=-/etc/kubernetes/kube-proxy
ExecStart=/usr/bin/kube-proxy $KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
vim /etc/kubernetes/kube-proxy
KUBE_PROXY_ARGS="--kubeconfig=/etc/kubernetes/kubeproxy.config --log-dir=/var/log/kubernetes --v=2"
vi /etc/kubernetes/kubeproxy.config
apiVersion: v1
clusters:
- cluster:
certificate-authority: /etc/kubernetes/cert/ca.crt
server: https://192.168.122.31:6443
name: default
contexts:
- context:
cluster: default
user: default
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: default
user:
client-certificate: /etc/kubernetes/cert/kubelet_client.crt
client-key: /etc/kubernetes/cert/kubelet_client.key
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy
master节点配置kubectl
其他节点装不装看个人需要
mkdir /root/.kube
vi $HOME/.kube/config
apiVersion: v1
clusters:
- cluster:
certificate-authority: /etc/kubernetes/cert/ca.crt
server: https://192.168.122.31:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate: /etc/kubernetes/cert/cs_client.crt
client-key: /etc/kubernetes/cert/cs_client.key
检查集群及节点状态
kubectl get node
kubectl get cs
网络组件
#下载calicoctl
# wget -O /usr/local/bin/calicoctl https://github.com/projectcalico/calicoctl/releases/download/v3.9.1/calicoctl
# chmod +x /usr/local/bin/calicoctl
kubectl apply -f calico.yaml
#配置cidr和controller-manager定义的Pod cidr相同
- name: CALICO_IPV4POOL_CIDR
value: "10.245.0.0/16"
kubectl apply -f calico.yaml
#默认启动是ipip隧道 node-to-node mesh模式
#我们改为rr模式
https://mritd.com/2019/06/18/calico-3.6-forward-network-traffic/
https://blog.51cto.com/u_14143894/2463392
vi bgp.yaml
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
logSeverityScreen: Info
nodeToNodeMeshEnabled: false
#获取node名称
calicoctl get node
#导出一个作为rr选择器的节点配置
calicoctl get node 192.168.122.30 -o yaml > node.yaml
#修改配置,增加选择器参数
apiVersion: projectcalico.org/v3
kind: Node
metadata:
annotations:
projectcalico.org/kube-labels: '{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"192.168.122.30","kubernetes.io/os":"linux"}'
creationTimestamp: "2021-06-08T04:05:06Z"
labels:
beta.kubernetes.io/arch: amd64
beta.kubernetes.io/os: linux
kubernetes.io/arch: amd64
kubernetes.io/hostname: 192.168.122.30
kubernetes.io/os: linux
route-reflector: "true" # 注意是增加这一行
name: 192.168.122.30
resourceVersion: "71198"
uid: f9b144a5-90e4-432e-8154-93e61bea683c
spec:
addresses:
- address: 192.168.122.30/24
- address: 192.168.122.30
bgp:
ipv4Address: 192.168.122.30/24
ipv4IPIPTunnelAddr: 172.16.228.192
orchRefs:
- nodeName: 192.168.122.30
orchestrator: k8s
status: {}
calicoctl apply -f node.yaml
vi rr.yaml
kind: BGPPeer
apiVersion: projectcalico.org/v3
metadata:
name: peer-to-rrs
spec:
nodeSelector: "!has(route-reflector)"
peerSelector: has(route-reflector)
---
kind: BGPPeer
apiVersion: projectcalico.org/v3
metadata:
name: rr-mesh
spec:
nodeSelector: has(route-reflector)
peerSelector: has(route-reflector)
#应用生效
calicoctl apply -f bgp.yaml.yaml
calicoctl apply -f rr.yaml
#确认是否生效
calicoctl node status
#其他机器(比如宿主机)访问需要加2条路由
sudo ip route add 10.245.0.0/16 via 192.168.122.30
sudo ip route add 10.244.0.0/16 via 192.168.122.30
安装coredns
./deploy.sh -i 10.244.0.2 -d test.com > coredns.yaml
kubectl apply -f coredns.yaml
#本机加一条dns server
vi /etc/resole.conf
nameserver 10.244.0.2
#装个web图形
kubectl apply -f dashboard.yaml
#这个时候可以 https://pod ip:8443端口访问dashboard
#安装ingress
kubectl apply -f ingress.yaml
kubectl delete -A ValidatingWebhookConfiguration ingress-nginx-admission
kubectl apply -f dashboard-ingress.yaml
vi dashboard-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: dashboard-ingress
namespace: kubernetes-dashboard
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
rules:
- host: "dashboard.xxx"
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: kubernetes-dashboard
port:
number: 443