Kubernetes集群搭建

实验环境

三台虚拟机在VMware Fusion中搭建。

NameIP
master1192.168.0.108
node1192.168.0.109
node2192.168.0.110

创建步骤

install docker
yum -y install wget
rm -rf  /etc/yum.repos.d/*.repo
cd /etc/yum.repos.d/
wget  http://mirrors.aliyun.com/repo/Centos-7.repo
yum clean all
yum makecache
yum repolist
yum -y install docker
systemctl start docker
systemctl enable docker.service
systemctl stop firewalld
systemctl disable firewalld
sed -i 's/SELINUX=enforcing/SELINUX=permissive/g' /etc/sysconfig/selinux
setenforce 0
mkdir -p /etc/docker
cat << EOF >  /etc/docker/daemon.json
{
  "registry-mirrors": ["https://registry.docker-cn.com"]
}
EOF
systemctl restart docker
install components
#master1 yum
yum install kubernetes-master etcd flannel -y
#node1,node2 yum
yum install kubernetes-node etcd flannel -y
#install etcd on master1,node1,node2
yum -y install etcd
master1 etcd
cat << EOF > /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
User=root
ExecStart=/usr/bin/etcd  \
    --name=${ETCD_NAME} \
    --data-dir=${ETCD_DATA_DIR} \
    --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS} \
    --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
    --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
    --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
    --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
    --initial-cluster-state=${ETCD_INITIAL_CLUSTER_STATE} \
    --initial-cluster=${ETCD_INITIAL_CLUSTER}
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
cat << EOF > /etc/etcd/etcd.conf
#[Member]
ETCD_DATA_DIR="/var/lib/etcd-jingtai/data"
ETCD_LISTEN_PEER_URLS="http://192.168.0.108:2380"
ETCD_LISTEN_CLIENT_URLS="http://127.0.0.1:2379,http://192.168.0.108:2379"
ETCD_NAME="master"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.0.108:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://127.0.0.1:2379,http://192.168.0.108:2379"
ETCD_INITIAL_CLUSTER="master=http://192.168.0.108:2380,slave1=http://192.168.0.109:2380,slave2=http://192.168.0.110:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster001"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
mkdir -p /var/lib/etcd-jingtai/data
systemctl daemon-reload
systemctl enable etcd
node1 etcd
cat << EOF > /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
User=root
ExecStart=/usr/bin/etcd  \
    --name=${ETCD_NAME} \
    --data-dir=${ETCD_DATA_DIR} \
    --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS} \
    --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
    --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
    --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
    --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
    --initial-cluster-state=${ETCD_INITIAL_CLUSTER_STATE} \
    --initial-cluster=${ETCD_INITIAL_CLUSTER}
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
cat << EOF > /etc/etcd/etcd.conf
#[Member]
ETCD_DATA_DIR="/var/lib/etcd-jingtai/data"
ETCD_LISTEN_PEER_URLS="http://192.168.0.109:2380"
ETCD_LISTEN_CLIENT_URLS="http://127.0.0.1:2379,http://192.168.0.109:2379"
ETCD_NAME="slave1"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.0.109:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://127.0.0.1:2379,http://192.168.0.109:2379"
ETCD_INITIAL_CLUSTER="master=http://192.168.0.108:2380,slave1=http://192.168.0.109:2380,slave2=http://192.168.0.110:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster001"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
mkdir -p /var/lib/etcd-jingtai/data
systemctl daemon-reload
systemctl enable etcd
node2 etcd
cat << EOF > /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
User=root
ExecStart=/usr/bin/etcd  \
    --name=${ETCD_NAME} \
    --data-dir=${ETCD_DATA_DIR} \
    --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS} \
    --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
    --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
    --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
    --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
    --initial-cluster-state=${ETCD_INITIAL_CLUSTER_STATE} \
    --initial-cluster=${ETCD_INITIAL_CLUSTER}
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
cat << EOF > /etc/etcd/etcd.conf
#[Member]
ETCD_DATA_DIR="/var/lib/etcd-jingtai/data"
ETCD_LISTEN_PEER_URLS="http://192.168.0.110:2380"
ETCD_LISTEN_CLIENT_URLS="http://127.0.0.1:2379,http://192.168.0.110:2379"
ETCD_NAME="slave2"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.0.110:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://127.0.0.1:2379,http://192.168.0.110:2379"
ETCD_INITIAL_CLUSTER="master=http://192.168.0.108:2380,slave1=http://192.168.0.109:2380,slave2=http://192.168.0.110:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster001"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
mkdir -p /var/lib/etcd-jingtai/data
systemctl daemon-reload
systemctl enable etcd
master1,node1,node2 etcd start
systemctl start etcd #at the sametime
master1 apiserver
cp /etc/kubernetes/config  /etc/kubernetes/config.bak
cat <<EOF> /etc/kubernetes/config
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=true"
KUBE_MASTER="--master=http://192.168.0.108:8080"
EOF
cp /etc/kubernetes/apiserver /etc/kubernetes/apiserver.bak
cat <<EOF>  /etc/kubernetes/apiserver
KUBE_API_ADDRESS="--address=0.0.0.0"
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.0.108:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
EOF
systemctl start kube-apiserver
systemctl start kube-controller-manager
systemctl start kube-scheduler
systemctl enable kube-apiserver
systemctl enable kube-controller-manager
systemctl enable kube-scheduler
node1 kubelet
cp /etc/kubernetes/config  /etc/kubernetes/config.bak
cat <<EOF> /etc/kubernetes/config
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.0.108:8080"
EOF
cp /etc/kubernetes/kubelet /etc/kubernetes/kubelet.bak
cat <<EOF> /etc/kubernetes/kubelet
KUBELET_ADDRESS="--address=192.168.0.109"
KUBELET_HOSTNAME="--hostname-override=node1"
KUBELET_API_SERVER="--api-servers=http://192.168.0.108:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""
EOF
node2 kubelet
cp /etc/kubernetes/config  /etc/kubernetes/config.bak
cat <<EOF> /etc/kubernetes/config
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.0.108:8080"
EOF
cp /etc/kubernetes/kubelet /etc/kubernetes/kubelet.bak
cat <<EOF> /etc/kubernetes/kubelet
KUBELET_ADDRESS="--address=192.168.0.110"
KUBELET_HOSTNAME="--hostname-override=node2"
KUBELET_API_SERVER="--api-servers=http://192.168.0.108:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""
EOF
node1, node2 kubelet,kube-proxy start
systemctl start kubelet
systemctl start kube-proxy
systemctl enable kubelet
systemctl enable kube-proxy
master1 node1 node2 flanneld

#master1

cat << EOF > /etc/sysconfig/flanneld
FLANNEL_ETCD_KEY="/coreos.com/network"
FLANNEL_ETCD="http://192.168.0.108:2379"
EOF

#node1

cat << EOF > /etc/sysconfig/flanneld
FLANNEL_ETCD_KEY="/coreos.com/network"
FLANNEL_ETCD="http://192.168.0.109:2379"
EOF

#node2

cat << EOF > /etc/sysconfig/flanneld
FLANNEL_ETCD_KEY="/coreos.com/network"
FLANNEL_ETCD="http://192.168.0.110:2379"
EOF
etcdctl mk /coreos.com/network/config '{"Network": "10.1.0.0/16"}'
ip link del docker0ip link del docker0
systemctl start flanneld
systemctl enable flanneld
master1,node1,node2 docker service
vi /usr/lib/systemd/system/docker.service
#insert following lines in Environment command
EnvironmentFile=-/etc/sysconfig/flanneld
EnvironmentFile=-/run/flannel/subnet.env
#insert following line in execute command
          --bip=${FLANNEL_SUBNET} \

docker restart
systemctl daemon-reload
systemctl restart docker
kubectl validate
kubectl get nodes

参考链接

Kubernetes——虚拟机上k8s集群搭建: https://blog.csdn.net/qq_38252499/article/details/99214276
静态方式部署etcd集群: https://www.jianshu.com/p/5ea027315285

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值