Kubernetes搭建(1.24.4)
服务器资源信息
主机名 | ip地址 | CPU | 内存 | 磁盘 |
---|---|---|---|---|
k8s-prd-master1 | 172.24.226.72 | 8 | 16 | 200+500 |
k8s-prd-master2 | 172.24.226.73 | 8 | 16 | 200 |
k8s-prd-master3 | 172.24.226.74 | 8 | 16 | 200 |
k8s-prd-vip | 172.24.226.101 |
docker images:
1,找一台能上外网的服务器并安装docker
2,docker pull <image_name>:<image_version> ##pull所需要的镜像
3,docker save -o <name.tar> <image_name>:<image_version> ##将所需要的镜像打包的tar文件
4,将tar文件下载到本地,然后上传到服务器上
5,ctr -n=k8s.io images import <name.tar> ##解压tar包
rpm:
1,找一台能上外网的服务器
2,配置如下yum源
cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
3,清空并重载yum
yum clean all && yum makecache4,下载但不安装如下组件,将其保存在/root/k8s-repo文件夹中
yum install -y kubelet-1.24.4 kubeadm-1.24.4 kubectl-1.24.4 --downloadonly --downloaddir=/root/k8s-repo --disableexcludes=kubernetes
5,压缩打包rpm包
zip k8s-repo.zip /root/k8s-repo/*
6,将rpm包下载到本地,然后上传到离线服务器中
安装kubernetes
1, k8s基础环境部署
1,修改主机名
hostnamectl set-hostname k8s-prd-master1
hostnamectl set-hostname k8s-prd-master2
hostnamectl set-hostname k8s-prd-master3
2,配置hosts解析
cat >>/etc/hosts<< EOF
172.24.226.72 k8s-prd-master1
172.24.226.73 k8s-prd-master2
172.24.226.74 k8s-prd-master3
EOF
3,swap、防火墙和selinux配置
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab ##关闭swap分区
setenforce 0 && sed -ri 's#(SELINUX=).*#\1disabled#' /etc/selinux/config ##关闭selinux
systemctl disable firewalld && systemctl stop firewalld ##关闭防火墙
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X ##重置iptables规则
4,开启iptables转发策略
iptables -P FORWARD ACCEPT
5,升级内核
wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm
yum -y localinstall kernel-ml*
grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
reboot ##重启服务器验证
6,启用br_netfilter模块
modprobe br_netfilter && modprobe overlay && modprobe ip_conntrack ##临时启动br_netfilter模块
#开机自动加载
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
cat >> /etc/rc.sysinit <<EOF
#!/bin/bash
for file in /etc/sysconfig/modules/*.modules ; do
[ -x $file ] && $file
done
EOF
echo "modprobe br_netfilter" >/etc/sysconfig/modules/br_netfilter.modules && \
echo "modprobe ip_conntrack" >/etc/sysconfig/modules/ip_conntrack.modules && \
chmod 755 /etc/sysconfig/modules/br_netfilter.modules && \
chmod 755 /etc/sysconfig/modules/ip_conntrack.modules
7,优化内核配置
cat >> /etc/sysctl.d/k8s.conf <<EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.max_map_count=262144
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl -p /etc/sysctl.d/k8s.conf
8,打开系统文件数限制
cat >> /etc/security/limits.conf <<EOF
* soft nofile 655350
* hard nofile 655350
* soft nproc 655350
* hard nproc 655350
* soft core unlimited
* hard core unlimited
EOF
9,安装containerd运行时
下载containerd
get https://github.com/containerd/containerd/releases/download/v1.6.10/cri-containerd-1.6.10-linux-amd64.tar.gz
解压
tar -xf cri-containerd-1.6.10-linux-amd64.tar.gz -C /
containerd -v
runc -h ##报错说明我们默认的libseccomp的版本不支持containerd了
下载高于2.4的包
wget http://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm
yum -y install libseccomp-2.5.1-1.e18.x86_64.rpm
runc -v ##现在runc命令就可以正常使用了
mkdir /etc/containerd
10,配置containerd
修改配置文件/etc/containerd/config.toml, 145行添加config_path
144 [plugins."io.containerd.grpc.v1.cri".registry]
145 config_path = "/etc/containerd/certs.d"
146
147 [plugins."io.containerd.grpc.v1.cri".registry.auths]
148
149 [plugins."io.containerd.grpc.v1.cri".registry.configs]
150
151 [plugins."io.containerd.grpc.v1.cri".registry.headers]
152
153 [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
11,重启containerd
systemctl restart containerd && systemctl enable containerd
2、kubernetes初始化
1,yum安装kubeadm、kubelet和kubectl(rpm包见开头引用)
yum install -y kubelet-1.24.4 kubeadm-1.24.4 kubectl-1.24.4 --disableexcludes=kubernetes
kubeadm version ##查看kubeadm版本
systemctl enable kubelet
2,安装配置haproxy、keeplived
$ yum install keepalived haproxy -y
# 所有master节点执行,注意替换最后的master节点IP地址
$ vi /etc/haproxy/haproxy.cfg
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend monitor-in
bind *:33305
mode http
option httplog
monitor-uri /monitor
frontend k8s-master
bind 0.0.0.0:7443
bind 127.0.0.1:7443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master
backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
k8s-prd-master1 172.24.226.72 check
k8s-prd-master2 172.24.226.73 check
k8s-prd-master3 172.24.226.74 check
# 在k8s-master1节点,注意mcast_src_ip换成实际的master1ip地址,virtual_ipaddress换成lb地址
$ vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface eth0
mcast_src_ip 172.24.226.72
virtual_router_id 60
priority 101
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
172.24.226.101
}
track_script {
chk_apiserver
}
}
# 在k8s-master2和k8s-master3分别创建/etc/keepalived/keepalived.conf,注意修改mcast_src_ip和virtual_ipaddress和state 改为BACKUP
# 在k8s-master2节点
$ cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
mcast_src_ip 172.24.226.73
virtual_router_id 60
priority 101
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
172.24.226.101
}
track_script {
chk_apiserver
}
}
#所有master节点配置KeepAlived健康检查文件:
$ cat /etc/keepalived/check_apiserver.sh
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
# 启动haproxy和keepalived---->所有master节点
$ chmod +x /etc/keepalived/check_apiserver.sh
$ systemctl daemon-reload
$ systemctl enable --now haproxy
$ systemctl enable --now keepalived
# 测试lbip是否生效
$ telnet 172.24.226.101 7443
3,初始化配置文件
kubeadm config print init-defaults > kubeadm.yaml
$ cat kubeadm.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: 7t2weq.bjbawausm0jaxury
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 172.24.226.73
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
name: k8s-prd-master1
---
apiServer:
certSANs:
- 172.24.226.101
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 172.24.226.101:7443
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.24.4
networking:
dnsDomain: cluster.local
podSubnet: 100.125.0.0/16
serviceSubnet: 1.1.1.0/24
scheduler: {}
4,将kubernetes所需镜像pull到本地
# 查看需要使用的镜像列表,若无问题,将得到如下列表
kubeadm config images list --config kubeadm.yaml
registry.aliyuncs.com/google_containers/kube-apiserver:v1.24.4
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.24.4
registry.aliyuncs.com/google_containers/kube-scheduler:v1.24.4
registry.aliyuncs.com/google_containers/kube-proxy:v1.24.4
registry.aliyuncs.com/google_containers/pause:3.7
registry.aliyuncs.com/google_containers/etcd:3.5.3-0
registry.aliyuncs.com/google_containers/coredns:v1.8.6
#将镜像从能上网的服务器拷到本地
5,初始化集群
kubeadm init --config kubeadm.yaml --upload-certs
#如果初始化成功,会得到如下信息
...
Your Kubernetes master has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 172.24.226.101:7443 --token 7t2weq.bjbawausm0jaxury --discovery-token-ca-cert-hash sha256:b0d875f1dafe9f479b23603c3424cad5e0e3aa0a47a8274f9d24432e97e3dbde --control-plane --certificate-key 0ea981458813160b6fbc572d415e14cbc28c4bf958a765a7bc989b7ecc5dcdd6
#接下来按照上述提示信息操作,配置kubectl客户端的认证
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
#如果你是root用户,则可以运行
export KUBECONFIG=/etc/kubernetes/admin.conf
3,kubernetes配置
1,部署网络插件
#下载flannel的yaml文件
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#修改配置,指定网卡名称,大概在文件的159行,添加一行配置
vi kube-flannel.yml
...
150 containers:
151 - name: kube-flannel
152 #image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply)
153 image: docker.io/rancher/mirrored-flannelcni-flannel:v0.19.2
154 command:
155 - /opt/bin/flanneld
156 args:
157 - --ip-masq
158 - --kube-subnet-mgr
159 - --iface=eth0 # 如果机器存在多网卡的话,指定内网网卡的名称,默认不指定的话会找第一块网卡
160 resources:
161 requests:
162 cpu: "100m"
163 memory: "50Mi"
...
修改pod网段
...
98 net-conf.json: |
99 {
100 "Network": "100.125.0.0/16",
101 "Backend": {
102 "Type": "vxlan"
103 }
104 }
...
# 执行flannel安装
kubectl apply -f kube-flannel.yml
kubectl -n kube-flannel get po -owide
2,集群配置
#设置master节点是否可调度(可选)
kubectl taint node k8s-master node-role.kubernetes.io/master:NoSchedule-
kubectl taint node k8s-master node-role.kubernetes.io/control-plane:NoSchedule-
设置kubectl自动补全
yum install bash-completion -yku
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
3,调整证书过期时间(将update-kube-cert.sh脚本上传,然后执行即可)
cd /etc/kubernetes/pki
# 查看当前证书有效期
for i in $(ls *.crt); do echo "===== $i ====="; openssl x509 -in $i -text -noout | grep -A 3 'Validity' ; done
mkdir backup_key; cp -rp ./* backup_key/
yum -y install git
git clone https://github.com/yuyicai/update-kube-cert.git
cd update-kube-cert/
bash update-kubeadm-cert.sh all
4,部署HPA插件
wget https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.1/components.yaml
vi components.yaml
# 添加- --kubelet-insecure-tls
...
133 containers:
134 - args:
135 - --cert-dir=/tmp
136 - --secure-port=4443
- --kubelet-insecure-tls
137 - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
138 - --kubelet-use-node-status-port
139 - --metric-resolution=15s
140 image: bitnami/metrics-server:0.6.1
141 imagePullPolicy: IfNotPresent
...
kubectl apply -f components.yaml
5,部署ingress-nginx插件
$ wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml
## 修改部署节点
$ vim deploy.yaml
...
405 ---
406 apiVersion: apps/v1
407 kind: DaemonSet ##修改控制器类型
408 metadata:
409 labels:
410 app.kubernetes.io/component: controller
411 app.kubernetes.io/instance: ingress-nginx
412 app.kubernetes.io/name: ingress-nginx
413 app.kubernetes.io/part-of: ingress-nginx
414 app.kubernetes.io/version: 1.4.0
415 name: ingress-nginx-controller
416 namespace: ingress-nginx
...
#添加污点容忍
...
425 template:
426 metadata:
427 labels:
428 app.kubernetes.io/component: controller
429 app.kubernetes.io/instance: ingress-nginx
430 app.kubernetes.io/name: ingress-nginx
431 spec:
##添加下面两行
432 tolerations:
433 - operator: "Exists"
434 containers:
435 - args:
436 - /nginx-ingress-controller
437 - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller
438 - --election-id=ingress-controller-leader
439 - --controller-class=k8s.io/ingress-nginx
440 - --ingress-class=nginx
441 - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
442 - --validating-webhook=:8443
443 - --validating-webhook-certificate=/usr/local/certificates/cert
444 - --validating-webhook-key=/usr/local/certificates/key
...
# 替换镜像地址
sed -i 's#registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f#myifeng/registry.k8s.io_ingress-nginx_kube-webhook-certgen:v1.3.0#g' deploy.yaml
sed -i 's#registry.k8s.io/ingress-nginx/controller:v1.4.0@sha256:34ee929b111ffc7aa426ffd409af44da48e5a0eea1eb2207994d9e0c0882d143#myifeng/registry.k8s.io_ingress-nginx_controller:v1.4.0#g' deploy.yaml
6,部署Storage-class-nfs
1,nfs服务端配置
1,添加磁盘
添加磁盘后,不重启直接扫盘
ls /sys/class/scsi_host
host0 host1 host2
重新扫描SCSI总线:
echo "- - -" > /sys/class/scsi_host/host0/scan
echo "- - -" > /sys/class/scsi_host/host1/scan
echo "- - -" > /sys/class/scsi_host/host2/scan
2,创建lv
创建PV:pvcreate /dev/sdb
创建VG:vgcreate /dev/vg_name /dev/sdb
创建LV:lvcreate -L 100G -n /dev/lv_name/vg_name
创建xfs文件系统:mkfs.xfs /dev/vg_name/lv_name
或者创建ext4文件系统:mkfs.ext4 /dev/vg_name/lv_name
挂载使用:
umount -v /dev/sdb1 ##根据挂载点卸载
umount -v /data ##根据目录卸载
mkdir /data ##创建挂载点
mount /dev/vg_name/lv_name /data ##临时挂载
echo "/dev/vg_name/lv_name /data ext4或者xfs defaults 0 0">>/etc/fstab ##永久挂载
3,安装配置nfs
#安装
yum -y install nfs-utils rpcbind
# 共享目录
mkdir -p /data && chmod 755 /data
echo '/k8s *(insecure,rw,sync,no_root_squash)'>>/etc/exports
systemctl enable rpcbind && systemctl start rpcbind
systemctl enable nfs && systemctl start nfs
2,nfs客户端配置
#客户端
yum -y install nfs-utils rpcbind
mkdir /k8s
mount -t nfs 172.24.226.72:/k8s /data
echo "mount -t nfs 172.24.226.72:/data /data" >>/etc/rc.local
3,配置Storage-class
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: nfs-provisioner
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: registry.cn-beijing.aliyuncs.com/mydlq/nfs-subdir-external-provisioner:v4.0.0
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs.com/nfs
- name: NFS_SERVER
value: 172.24.226.72
- name: NFS_PATH
value: /data
volumes:
- name: nfs-client-root
nfs:
server: 172.24.226.72
path: /data
kind: ServiceAccount
apiVersion: v1
metadata:
name: nfs-client-provisioner
namespace: nfs-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
namespace: nfs-provisioner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
namespace: nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: nfs-provisioner
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: nfs-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: nfs-provisioner
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "true" # 设置为default StorageClass
name: nfs
provisioner: nfs.com/nfs
parameters:
archiveOnDelete: "true"
7,安装helm
wget https://get.helm.sh/helm-v3.8.0-linux-amd64.tar.gz
tar -xf helm-v3.8.0-linux-amd64.tar.gz
mv helm /usr/local/bin/
helm version ##验证
安装harbor
1,下载(找一台能上网的安装了k8s和helm的服务器执行)
# 添加harbor chart仓库
$ helm repo add harbor https://helm.goharbor.io
# 搜索harbor的chart
$ helm search repo harbor
# 不知道如何部署,因此拉到本地
$ helm pull harbor/harbor
将安装包拷贝到本地,然后上传到服务器上
解压安装包
tar -xf harbor-1.12.2.tgz
2,定制化配置
1,生成harbor https自签名证书
1,安装cfssl证书工具
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssljson
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl*
2,获取默认配置
cfssl print-defaults config > ca-config.json
cfssl print-defaults csr > ca-csr.json
3,生成ca证书
将ca-config.json内容修改为
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"harbor": {
"expiry": "876000h",
"usages": [
"signing",
"key encipherment",
"server auth"
]
}
}
}
}
将ca-csr.json修改为
{
"CN": "CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "hangzhou",
"L": "hangzhou",
"O": "harbor",
"OU": "System"
}
]
}
生成ca证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
4,签发证书
创建harbor-csr.json,内容为
{
"CN": "harbor",
"hosts": [
"harbor.com.cn",
"*.harbor.com.cn"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"ST": "CA",
"L": "San Francisco",
"O": "harbor",
"OU": "System"
}
]
}
使用ca证书签发harbor证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=harbor harbor-csr.json | cfssljson -bare harbor
5,使用证书创建secret资源
kubectl -n harbor create secret generic harbor-tls --from-file=tls.crt=harbor.pem --from-file=tls.key=harbor-key.pem --from-file=ca.crt=ca.pem
kubectl -n harbor get secret -owide ##查看资源是否创建成功
2,修改harbor配置文件
cd harbor
vi value.yaml
#配置自签名证书
1 expose:
2 # Set how to expose the service. Set the type as "ingress", "clusterIP", "nodePort" or "loadBalancer"
3 # and fill the information in the corresponding section
4 type: ingress
5 tls:
6 # Enable TLS or not.
7 # Delete the "ssl-redirect" annotations in "expose.ingress.annotations" when TLS is disabled and "expose.type" is "ingress"
8 # Note: if the "expose.type" is "ingress" and TLS is disabled,
9 # the port must be included in the command when pulling/pushing images.
10 # Refer to https://github.com/goharbor/harbor/issues/5291 for details.
11 enabled: true
12 # The source of the tls certificate. Set as "auto", "secret"
13 # or "none" and fill the information in the corresponding section
14 # 1) auto: generate the tls certificate automatically
15 # 2) secret: read the tls certificate from the specified secret.
16 # The tls certificate can be generated manually or by cert manager
17 # 3) none: configure no tls certificate for the ingress. If the default
18 # tls certificate is configured in the ingress controller, choose this option
19 certSource: secret
20 auto:
21 # The common name used to generate the certificate, it's necessary
22 # when the type isn't "ingress"
23 commonName: "harbor.harbor.com.cn"
24 secret:
25 # The name of secret which contains keys named:
26 # "tls.crt" - the certificate
27 # "tls.key" - the private key
28 secretName: "harbor-tls"
29 # The name of secret which contains keys named:
30 # "tls.crt" - the certificate
31 # "tls.key" - the private key
32 # Only needed when the "expose.type" is "ingress".
33 notarySecretName: ""
#修改ingress访问的配置
34 ingress:
35 hosts:
36 core: harbor.harbor.com.cn
37 notary: harbor.harbor.com.cn
38 # set to the type of ingress controller if it has specific requirements.
39 # leave as `default` for most ingress controllers.
40 # set to `gce` if using the GCE ingress controller
41 # set to `ncp` if using the NCP (NSX-T Container Plugin) ingress controller
42 # set to `alb` if using the ALB ingress controller
43 # set to `f5-bigip` if using the F5 BIG-IP ingress controller
44 controller: default
45 ## Allow .Capabilities.KubeVersion.Version to be overridden while creating ingress
46 kubeVersionOverride: ""
47 className: "nginx"
#externalURL,web访问入口,和ingress的域名相同(131行)
131 externalURL: https://harbor.harbor.com.cn
#持久化,使用PVC对接的nfs
208 persistentVolumeClaim:
209 registry:
210 # Use the existing PVC which must be created manually before bound,
211 # and specify the "subPath" if the PVC is shared with other components
212 existingClaim: "harbor-data"
213 # Specify the "storageClass" used to provision the volume. Or the default
214 # StorageClass will be used (the default).
215 # Set it to "-" to disable dynamic provisioning
216 storageClass: ""
217 subPath: "registry"
218 accessMode: ReadWriteOnce
219 size: 400Gi
220 annotations: {}
221 jobservice:
222 jobLog:
223 existingClaim: "harbor-data"
224 storageClass: ""
225 subPath: "jobservice"
226 accessMode: ReadWriteOnce
227 size: 10Gi
228 annotations: {}
229 # If external database is used, the following settings for database will
230 # be ignored
231 database:
232 existingClaim: "harbor-data"
233 storageClass: ""
234 subPath: "database"
235 accessMode: ReadWriteOnce
236 size: 20Gi
237 annotations: {}
238 # If external Redis is used, the following settings for Redis will
239 # be ignored
240 redis:
241 existingClaim: "harbor-data"
242 storageClass: ""
243 subPath: "redis"
244 accessMode: ReadWriteOnce
245 size: 5Gi
246 annotations: {}
247 trivy:
248 existingClaim: "harbor-data"
249 storageClass: ""
250 subPath: "trivy"
251 accessMode: ReadWriteOnce
252 size: 20Gi
253 annotations: {}
#管理员账户密码
366 harborAdminPassword: "Harbor12345!"
#trivy、notary漏洞扫描组件,暂不启用
610 trivy:
611 # enabled the flag to enable Trivy scanner
612 enabled: false
613 image:
614 # repository the repository for Trivy adapter image
615 repository: goharbor/trivy-adapter-photon
616 # tag the tag for Trivy adapter image
617 tag: v2.8.2
683 notary:
684 enabled: false
685 server:
686 # set the service account to be used, default if left empty
687 serviceAccountName: ""
688 # mount the service account token
689 automountServiceAccountToken: false
690 image:
691 repository: goharbor/notary-server-photon
692 tag: v2.8.2
3,创建ns和pvc
#创建命名空间和pvc
kubectl create namespace harbor
cat harbor-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: harbor-data
namespace: harbor
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 400Gi
storageClassName: nfs
# 创建pvc
kubectl create -f harbor-pvc.yaml
4,安装harbor
helm -n harbor install harbor ./harbor
客户端使用harbor
1,配置本地containerd
#配置镜像仓库
mkdir -p /etc/containerd/certs.d/harbor.harbor.com.cn
cat >/etc/containerd/certs.d/harbor.harbor.com.cn/hosts.toml <<EOF
server = "https://harbor.harbor.com.cn"
[host."https://harbor.harbor.com.cn"]
capabilities = ["pull", "resolve", "push"]
skip_verify = true
EOF
#重载配置并重启containerd
systemctl daemon-reload && systemctl restart containerd
2,上传镜像
#为镜像打tag
ctr -n k8s.io i tag registry.aliyuncs.com/google_containers/pause:3.7 harbor.harbor.com.cn/test/pause:3.7
上传镜像到harbor
ctr --n=k8s.io images push harbor.harbor.com.cn/test/pause:3.7 --skip-verify --user admin:Harbor12345
3,k8s从harbor pull镜像
#创建secret
kubectl create secret docker-registry harbor \
--docker-server=harbor.harbor.com.cn \
--docker-username=admin \
--docker-password=Harbor12345
#yaml中指定secret
containers:
- name: nginx
image: nginx:alpine
imagePullPolicy: IfNotPresent
imagePullSecrets:
- name: harbor
4,docker使用harbor
cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://ggb52j62.mirror.aliyuncs.com"],
"insecure-registries":["harbor.harbor.com.cn"] # 将Harbor仓库的地址添加为Dokcer的信任列表
}
# 然后重启Docker
systemctl daemon-reload && systemctl restart docker
#给docker镜像打tag
docker tag registry.aliyuncs.com/google_containers/pause:3.7 harbor.harbor.com.cn/test/pause:3.7
#登录harbor镜像仓库
docker login harbor.harbor.com.cn
#上传或者下载镜像
docker push harbor.harbor.com.cn/test/pause:3.7