系统初始化
资源 :
* 镜像仓库
* k8s-master01 k8s-node01 k8s-node02
1 设置 系统 主机名以及host文件的相互解析
hostnamectl set-hostname k8s-node02
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-master01
2 安装依赖宝
[root@k8s-node02 ~]# yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git |
3 关掉 防火墙 和 selinux
4 设置 防火墙 为 iptables
[root@k8s-node02 ~]# yum -y install iptables-services [root@k8s-node02 ~]# systemctl start iptables [root@k8s-node02 ~]# systemctl enable iptables Created symlink from /etc/systemd/system/basic.target.wants/iptables.service to /usr/lib/systemd/system/iptables.service. [root@k8s-node02 ~]# iptables -F [root@k8s-node02 ~]# service iptables save |
5 禁用 交换分区
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
[root@k8s-node02 ~]# free -mh total used free shared buff/cache available Mem: 1.8G 105M 1.3G 9.5M 373M 1.5G Swap: 0B 0B 0B |
调整 内核参数
[root@k8s-node02 ~]# cat > kubernetes.conf <<EOF > net.bridge.bridge-nf-call-iptables=1 > net.bridge.bridge-nf-call-ip6tables=1 > net.ipv4.ip_forward=1 > net.ipv4.tcp_tw_recycle=0 > vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它 > vm.overcommit_memory=1 # 不检查物理内存是否够用 > vm.panic_on_oom=0 # 开启 OOM > fs.inotify.max_user_instances=8192 > fs.inotify.max_user_watches=1048576 > fs.file-max=52706963 > fs.nr_open=52706963 > net.ipv6.conf.all.disable_ipv6=1 > net.netfilter.nf_conntrack_max=2310720 > EOF [root@k8s-node02 ~]# cat kubernetes.conf
[root@k8s-node02 ~]# cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
// 当前版本没有,升级到4.0之后才会有 [root@k8s-node02 ~]# sysctl -p /etc/sysctl.d/kubernetes.conf |
调整系统时区
[root@k8s-master01 ~]# timedatectl set-timezone Asia/Shanghai [root@k8s-master01 ~]# timedatectl set-local-rtc 0 [root@k8s-master01 ~]# systemctl restart rsyslog [root@k8s-master01 ~]# systemctl restart crond |
关闭系统不需要的服务
[root@k8s-node01 ~]# systemctl stop postfix && systemctl disable postfix Removed symlink /etc/systemd/system/multi-user.target.wants/postfix.service. |
设置 rsyslogd 和 systemd journald
[root@k8s-master01 ~]# mkdir /var/log/journal # 持久化保存日志的目录 [root@k8s-master01 ~]# mkdir /etc/systemd/journald.conf.d [root@k8s-master01 ~]# cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF > [Journal] > # 持久化保存到磁盘 > Storage=persistent > # 压缩历史日志 > Compress=yes > SyncIntervalSec=5m > RateLimitInterval=30s > RateLimitBurst=1000 > # 最大占用空间 10G > SystemMaxUse=10G > # 单日志文件最大 200M > SystemMaxFileSize=200M > # 日志保存时间 2 周 > MaxRetentionSec=2week > # 不将日志转发到 > syslog ForwardToSyslog=no > EOF [root@k8s-master01 ~]# systemctl restart systemd-journald |
升级系统内核为 4.44
CentOS 7.x 系统自带的 3.10.x 内核存在一些 Bugs,导致运行的 Docker、Kubernetes 不稳定
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm # 安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装 一次! yum --enablerepo=elrepo-kernel install -y kernel-lt # 设置开机从新内核启动 grub2-set-default 'CentOS Linux (4.4.189-1.el7.elrepo.x86_64) 7 (Core)'
|
*****************************
Kubeadm 部署安装
开启 kube-proxy ipvs 的前置条件
[root@k8s-master01 ~]# modprobe br_netfilter [root@k8s-master01 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF > #!/bin/bash > modprobe -- ip_vs > modprobe -- ip_vs_rr > modprobe -- ip_vs_wrr > modprobe -- ip_vs_sh > modprobe -- nf_conntrack_ipv4 > EOF [root@k8s-master01 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
|
安装 docker 软件
yum install -y yum-utils device-mapper-persistent-data lvm2
#卸载老版本的 docker 及其相关依赖 yum remove docker docker-common container-selinux docker-selinux docker-engine #更新yum yum update #安装 yum-utils,它提供了 yum-config-manager,可用来管理yum源 yum install -y yum-utils # 添加yum源 yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo # 更新索引 yum makecache fast #安装 docker-ce yum install -y docker-ce
## 创建 /etc/docker 目录 mkdir /etc/docker # 配置 daemon. cat > /etc/docker/daemon.json <<EOF { "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m" } } EOF mkdir -p /etc/systemd/system/docker.service.d # 重启docker服务 systemctl daemon-reload && systemctl restart docker && systemctl enable docker
|
安装 kubeadm (主从配置)
cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=0 gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1 systemctl enable kubelet.service |
初始化主节点
注意: kubeadm 初始化节点,需要科学上网,如果没有 科学 上网,想办法 把需要的 节点 导入 进 docker
初始化一个配置文件
kubeadm config print init-defaults > kubeadm-config.yaml
主要 修改了
advertiseAddress: 192.168.43.120
为了 安装 使用 fannel 方便
podSubnet: 10.244.0.0/16
加多了一个字段 ,为了开启 ipvs
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
[root@k8s-master01 ~]# cat kubeadm-config.yaml apiVersion: kubeadm.k8s.io/v1beta2 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token token: abcdef.0123456789abcdef ttl: 24h0m0s usages: - signing - authentication kind: InitConfiguration localAPIEndpoint: advertiseAddress: 192.168.43.120 bindPort: 6443 nodeRegistration: criSocket: /var/run/dockershim.sock name: k8s-master01 taints: - effect: NoSchedule key: node-role.kubernetes.io/master --- apiServer: timeoutForControlPlane: 4m0s apiVersion: kubeadm.k8s.io/v1beta2 certificatesDir: /etc/kubernetes/pki clusterName: kubernetes controllerManager: {} dns: type: CoreDNS etcd: local: dataDir: /var/lib/etcd imageRepository: k8s.gcr.io kind: ClusterConfiguration kubernetesVersion: v1.15.1 networking: dnsDomain: cluster.local podSubnet: 10.244.0.0/16 serviceSubnet: 10.96.0.0/12 scheduler: {} --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration featureGates: SupportIPVSProxyMode: true mode: ipvs |
开始 初始化,把日志 输出到 kubeadm-init.log
kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
加入主节点 以及 其余工作节点
执行安装日志中的加入命令即可
比如
vi kubeadm-init.log
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.43.120:6443 --token abcdef.0123456789abcdef \ --discovery-token-ca-cert-hash sha256:97f04b294d466172dffc1eb1eb0f1888be2848bf208201780710a21b1594fce2 |
部署 网络
因为k8s 默认是扁平化 网络,所以要让节点 ready 状态,要部署网络
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-
flannel.yml