ubuntu16.04 k8s 二进制包安装

1.安装环境

Ubuntu 16.04 AIO
关闭SWAP:
$ sudo swapoff -a
$ vi /etc/fstab 注释
$ free -h 查看 
IP:172.29.236.111

Etcd v3.2.18
链接描述
https://github.com/coreos/etcd/releases/download/v3.2.18/etcd-v3.2.18-linux-amd64.tar.gz
Kubernetes-server v1.10.0 
链接描述
https://dl.k8s.io/v1.10.0/kubernetes-server-linux-amd64.tar.gz
Flannel v0.10.0
链接描述

https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz
Dashboard v1.8.3
链接描述
https://github.com/kubernetes/dashboard/archive/v1.8.3.tar.gz

2.安装docker(所有节点)


$ apt-get remove docker docker-engine docker.io


$ add-apt-repository  "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"


$ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -


$ apt-get install -y docker-ce
docker容器仓库加速


$ vim /etc/docker/daemon.json


{
    "registry-mirrors":["https://registry.docker-cn.com"]
}
3.系统设置(所有节点)


$ ufw disable
写入配置文件


$ cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
生效配置文件


$ sysctl -p /etc/sysctl.d/k8s.conf
4.部署ETCD(主节点)


$ tar zxvf etcd-v3.2.18-linux-amd64.tar.gz

$ tar zxvf kubernetes-server-linux-amd64.tar.gz

$ tar zxvf flannel-v0.10.0-linux-amd64.tar.gz

$ tar zxvf dashboard-1.8.3.tar.gz

$ cp {etcd,etcdctl} /usr/bin/
$ sudo vim /lib/systemd/system/etcd.service


[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos


[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd \
  --name=default \
  --listen-client-urls=http://172.29.236.111:2379,http://127.0.0.1:2379 \
  --advertise-client-urls=http://172.29.236.111:2379 \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536


[Install]
WantedBy=multi-user.target
$ sudo mkdir /var/lib/etcd/
启动服务


$ sudo systemctl daemon-reload 
$ sudo systemctl enable etcd
$ sudo systemctl start etcd
创建一个etcd网络


$ etcdctl set /coreos.com/network/config '{ "Network": "192.168.4.0/24" }'
部署APIServer(主节点)


$ cp {kube-apiserver,kube-controller-manager,kube-scheduler} /usr/bin/
$ sudo vim /lib/systemd/system/kube-apiserver.service


[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
ExecStart=/usr/bin/kube-apiserver \
  --admission-control=NamespaceLifecycle,LimitRanger,DefaultStorageClass,ResourceQuota,NodeRestriction \
  --insecure-bind-address=0.0.0.0 \
  --kubelet-https=false \
  --service-cluster-ip-range=10.68.0.0/16 \
  --service-node-port-range=20000-40000 \
  --etcd-servers=http://172.29.236.111:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/var/lib/audit.log \
  --event-ttl=1h \
  --v=2
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
$ sudo systemctl daemon-reload
$ sudo systemctl enable kube-apiserver
$ sudo systemctl start kube-apiserver
部署ControllerManager(主节点)


$ sudo vim /lib/systemd/system/kube-controller-manager.service


[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/usr/bin/kube-controller-manager \
  --address=127.0.0.1 \
  --master=http://127.0.0.1:8080 \
  --allocate-node-cidrs=true \
  --service-cluster-ip-range=10.68.0.0/16 \
  --cluster-cidr=172.20.0.0/16 \
  --cluster-name=kubernetes \
  --leader-elect=true \
  --cluster-signing-cert-file= \
  --cluster-signing-key-file= \
  --v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
$ sudo systemctl daemon-reload
$ sudo systemctl enable kube-controller-manager
$ sudo systemctl start kube-controller-manager
部署Scheduler(主节点)


$ sudo vim /lib/systemd/system/kube-scheduler.service


[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes


[Service]
ExecStart=/usr/bin/kube-scheduler \
  --address=127.0.0.1 \
  --master=http://127.0.0.1:8080 \
  --leader-elect=true \
  --v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target


$ sudo systemctl daemon-reload
$ sudo systemctl enable kube-scheduler
$ sudo systemctl start kube-scheduler
$cp flanneld /usr/bin/
$cp mk-docker-opts.sh /usr/bin/flannel/
部署flannel(所有节点)


$ sudo vim /lib/systemd/system/flanneld.service


[Unit]
Description=Flanneld
Documentation=https://github.com/coreos/flannel
After=network.target
After=etcd.service
Before=docker.service


[Service]
User=root
EnvironmentFile=/etc/default/flanneld.conf
ExecStart=/usr/bin/flanneld \
  -etcd-endpoints=${FLANNEL_ETCD_ENDPOINTS} \
  -etcd-prefix=${FLANNEL_ETCD_PREFIX} \
  $FLANNEL_OPTIONS
ExecStartPost=/usr/bin/flannel/mk-docker-opts.sh -k DOCKER_OPTS -d /run/flannel/docker
Restart=on-failure
Type=notify
LimitNOFILE=65536


[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
$ sudo vim /etc/default/flanneld.conf
# Flanneld configuration options


# etcd url location.  Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://172.29.236.111:2379"


# etcd config key.  This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/coreos.com/network"


# Any additional options that you want to pass
#FLANNEL_OPTIONS=""


$ sudo systemctl daemon-reload 
$ sudo systemctl enable flanneld
$ sudo systemctl start flanneld
5.修改docker-ce支持flannel网络
修改docker的systemd配置文件


$ vim /lib/systemd/system/docker.service
[Service]
Type=notify
EnvironmentFile=/run/flannel/docker
ExecStart=/usr/bin/dockerd -H fd:// $DOCKER_OPTS
重启docker服务。


$ sudo systemctl daemon-reload

$ sudo systemctl restart docker

查看docker是否有了flannel的网络。


$ sudo ps -ef | grep docker
root     11285     1  1 15:14 ?        00:00:01 /usr/bin/dockerd -H fd:// --bip=192.168.4.129/26 --ip-masq=true --mtu=1472
...

6.配置kubelet(工作节点)


$ cp {kubectl,kubelet,kube-proxy} /usr/bin/
$ sudo vim /lib/systemd/system/kubelet.service


[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service


[Service]
User=root
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/bin/kubelet \
  --address=172.29.236.111 \
  --hostname-override=172.29.236.111 \
  --pod-infra-container-image=registry.access.RedHat.com/rhel7/pod-infrastructure \
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
  --allow-privileged=false \
  --fail-swap-on=false \
  --logtostderr=true \
  --v=2
Restart=on-failure
RestartSec=5


[Install]

WantedBy=multi-user.target


kubelet.kubeconfig
apiVersion: v1
clusters:
- cluster:
    insecure-skip-tls-verify: true
    server: http://172.29.236.111:8080
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: ""
  name: system:node:kube-master
current-context: system:node:kube-master
kind: Config
preferences: {}

users: []

$ sudo mkdir /var/lib/kubelet

$ sudo systemctl daemon-reload
$ sudo systemctl enable kubelet
$ sudo systemctl start kubelet
7.配置kube-proxy服务


$ sudo vim /lib/systemd/system/kube-proxy.service


    [Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/bin/kube-proxy \
  --bind-address=172.29.236.111 \
  --hostname-override=172.29.236.111 \
  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \
  --logtostderr=true \
  --v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536


[Install]
WantedBy=multi-user.target


$ sudo vim /etc/kubernetes/kube-proxy.kubeconfig


apiVersion: v1
clusters:
- cluster:
    server: http://172.29.236.111:8080
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
  name: default
current-context: default
kind: Config
preferences: {}
users: []
$ sudo mkdir /var/lib/kube-proxy
$ sudo systemctl daemon-reload
$ sudo systemctl enable kube-proxy
$ sudo systemctl start kube-proxy
8.查询node状态 
  执行kubectl get node命令来查看node状态。都为Ready状态时,则说明node节点已经成功连接到master,如果不是该状态,则需要到该节点上,定位下原因。可通过journalctl -u kubelet.service命令来查看kubelet服务的日志。


$ kubectl get node
9.Kubernetes测试  
  测试Kubernetes是否成功安装。
编写yaml文件
  在Kubernetes master上创建一个nginx.yaml,用于创建一个nginx的ReplicationController。


$ vim rc_nginx.yaml


apiVersion: v1
kind: ReplicationController
metadata:
  name: nginx
  labels:
    name: nginx
spec:
  replicas: 2
  selector:
    name: nginx
  template:
    metadata:
      labels:
        name: nginx
    spec:
      containers:
      - name: nginx
        image: nginx
创建pod
  执行kubectl create命令创建ReplicationController。该ReplicationController配置中有两个副本,并且我们的环境有两个Kubernetes Node,因此,它应该会在两个Node上分别运行一个Pod。
  注意:这个过程可能会需要很长的时间,它会从网上拉取nginx镜像,还有pod-infrastructure这个关键镜像。


$ kubectl create -f rc_nginx.yaml
10.查询状态 
  执行kubectl get pod和rc命令来查看pod和rc状态。刚开始可能会处于containerCreating的状态,待需要的镜像下载完成后,就会创建具体的容器。pod状态应该显示Running状态。


nginx.yaml

apiVersion: v1
kind: Pod
metadata:
  name: nginx
  labels:
     app: nginx    
spec:
     containers:
        - name: nginx
          image: nginx
          imagePullPolicy: IfNotPresent
          ports:
          - containerPort: 80
     restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-service
spec:
  type: NodePort
  sessionAffinity: ClientIP
  selector:
    app: nginx
  ports:
    - port: 80
      nodePort: 30080


$ kubectl get rc

$ kubectl get pod -o wide

11.部署Dashboard



原文链接:https://blog.csdn.net/greatyoulv/article/details/80032794

转载请注明出处

联系邮箱:youlv@foxmail.com

没有更多推荐了,返回首页