1,环境安装的软件及版本

本次实践受限于电脑机器内存只能同时跑两台虚机,一台部署master和node,一台node

linux:version centos7.1(最小化安装)

docker:version 1.12.3

k8s:version 1.5.2(yum安装)

虚拟机安装成最小化,设置时间用网络时间,保证两个机器时间一致

mater--192.168.1.5

node--192.168.1.6

设置网络和防火墙,网络需要能访问外网,两台机器都要做

vi /etc/sysconfig/network-scripts/ifcfg-eno16777736

TYPE=Ethernet

BOOTPROTO=static

DEFROUTE=yes

PEERDNS=no

PEERROUTES=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_PEERDNS=yes

IPV6_PEERROUTES=yes

IPV6_FAILURE_FATAL=no

NAME=eno16777736

UUID=71c2c872-0682-4d95-a1dd-ec478dbd9385

DEVICE=eno16777736

ONBOOT=yes

IPADDR=192.168.1.5    #node机器修改成192.168.1.6

NETMASK=255.255.255.0

GATEWAY=192.168.1.1

DNS1=8.8.8.8

DNS2=114.114.114.114

设置完成后重启网路

systemctl restart network.service


接着关闭防火墙

setenforce 0

sed -i 's/enforcing/disabled/g' /etc/selinux/config

systemctl stop firewalld

systemctl disable firewalld


设置主机名 (分别设置master和node)

hostnamectl --static set-hostname  k8s-master
hostnamectl --static set-hostname  k8s-node-1
然后加入到hosts里面

192.168.1.5  k8s-master

192.168.1.6  k8s-node-1


设置yum源,这里用阿里云的

cd /etc/yum.repos.d/

wget http://mirrors.aliyun.com/repo/Centos-7.repo

wget http://mirrors.aliyun.com/repo/epel-7.repo

yum -y install epel-release


2,部署k8s环境

Kubernetes组件:
  - etcd 一个高可用的K/V键值对存储和服务发现系统
  - flannel 实现夸主机的容器网络的通信
  - kube-apiserver 提供kubernetes集群的API调用
  - kube-controller-manager 确保集群服务
  - kube-scheduler 调度容器,分配到Node
  - kubelet 在Node节点上按照配置文件中定义的容器规格启动容器
  - kube-proxy 提供网络代理服务

master上面部署的有etcd,kube-apiserver,kube-scheduler,kube-controller-manager,flannel,docker

node节点行部署的kube-proxy,kubelet,docker,flannel

由于机器少的原因,也要在master上部署node的组件用作一个node节点


2.1安装master

使用yum安装etcd和kubernetes-master,docker


yum -y install etcd kubernetes-master


yum -y install docker  flannel


flannel 这个组件必须在master上也安装(这是与节点中pod网络通讯必须要有的)

安装完成后编辑配置

2.1.1编辑/etc/etcd/etcd.conf文件

ETCD_DATA_DIR="/var/lib/etcd/default.etcd"

ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"

ETCD_NAME="master"

ETCD_ADVERTISE_CLIENT_URLS="http://192.168.1.5:2379"


2.1.2编辑/etc/kubernetes/apiserver文件


KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"

KUBE_API_PORT="--port=8080"

KUBELET_PORT="--kubelet-port=10250"

KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.1.5:2379"

KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"

KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota" #去掉了ServiceAccount

KUBE_API_ARGS=""


2.1.3启动etcd、kube-apiserver、kube-controller-manager、kube-scheduler等服务,并设置开机启动


for SERVICES in etcd kube-apiserver kube-controller-manager kube-scheduler; do systemctl restart $SERVICES;systemctl enable $SERVICES;systemctl status $SERVICES ; done


2.1.4 在etcd中定义flannel网络


etcdctl mk /atomic.io/network/config '{"Network":"172.17.0.0/16"}'     

#atomic.io这个名字在定义flannel,kubelet文件时候要用到


2.2安装配置node组件

下面要在所有node节点上进行操作

使用yum安装flannel和kubernetes-node


yum -y install flannel kubernetes-node


2.2.1为flannel网络指定etcd服务,修改/etc/sysconfig/flanneld文件


FLANNEL_ETCD="http://192.168.1.5:2379"

FLANNEL_ETCD_KEY="/atomic.io/network"


2.2.2修改/etc/kubernetes/config



KUBE_LOGTOSTDERR="--logtostderr=true"

KUBE_LOG_LEVEL="--v=0"

KUBE_ALLOW_PRIV="--allow-privileged=true"

KUBE_MASTER="--master=http://192.168.1.5:8080"


2.2.3修改对应node的配置文件/etc/kubernetes/kubelet



KUBELET_ADDRESS="--address=0.0.0.0"

KUBELET_PORT="--port=10250"

KUBELET_HOSTNAME="--hostname-override=192.168.1.6" #修改成对应Node的IP

KUBELET_API_SERVER="--api-servers=http://192.168.1.5:8080" #指定Master节点的API Server

KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=192.168.1.5:5000/pod-infrastructure:latest" #换成本机私有镜像pod-infrastructure

KUBELET_ARGS="--cluster-dns=10.254.254.254 --cluster-domain=atomic.io"  #配置连接的定义的网络


2.2.4 修改下docker的配置

vi /etc/sysconfig/docker

关闭下面的选项

#OPTIONS='--selinux-enabled --log-driver=journald --signature-verification=false'

#if [ -z "${DOCKER_CERT_PATH}" ]; then

#    DOCKER_CERT_PATH=/etc/docker

#fi

增加下面的选项(开放镜像仓库权限)

OPTIONS='--insecure-registry=192.168.1.5:5000'


2.2.5在所有Node节点上启动kube-proxy,kubelet,docker,flanneld等服务,并设置开机启动

所有机器上都要开启网络转发,用于不同宿主机的pod及容器的网路互通

临时生效:

echo "1" > /proc/sys/net/ipv4/ip_forward

永久生效的话,需要修改/etc/sysctl.conf:

net.ipv4.ip_forward = 1

执行sysctl -p马上生效


flanneld的启动要早于docker的启动,保证docker0与flannel0处于同一网段

for SERVICES in kube-proxy kubelet flanneld docker ;do systemctl restart $SERVICES;systemctl enable $SERVICES;systemctl status $SERVICES; done


查看node节点状态

[root@k8s-master ~]# kubectl get nodes

NAME          STATUS    AGE

192.168.1.5   Ready     2d

k8s-node-1    Ready     4d


3.建立私有镜像仓库


docker pull registry

docker run -d -p 5000:5000 -v /home/registry:/tmp/registry registry


pause-amd64也是一个必备镜像,应用服务pod可以用它。

pod-infrastructure是必要的镜像,没启动一个pod,它也要启动一个。


docker pull docker.io/tianyebj/pod-infrastructure

docker tag docker.io/tianyebj/pod-infrastructure 192.168.1.5:5000/pod-infrastructure:latest

docker push 192.168.1.5:5000/pod-infrastructure


docker pull docker.io/ist0ne/kubernetes-dashboard-amd64

docker tag docker.io/ist0ne/kubernetes-dashboard-amd64 192.168.1.5:5000/kubernetes-dashboard

docker push 192.168.1.5:5000/kubernetes-dashboard


docker pull docker.io/ist0ne/kubernetes-dashboard-amd64:v1.5.1

docker tag docker.io/ist0ne/kubernetes-dashboard-amd64:v1.5.1 192.168.1.5:5000/kubernetes-dashboard-amd64:v1.5.1

docker push 192.168.1.5:5000/kubernetes-dashboard-amd64:v1.5.1


heapster使用的三个镜像

docker pull wanghkkk/heapster-influxdb-amd64-v1.3.3:v1.3.3

docker pull wanghkkk/heapster-amd64-v1.4.0:v1.4.0

docker pull wanghkkk/heapster-grafana-amd64-v4.4.3:v4.4.3

docker tag docker.io/wanghkkk/heapster-influxdb-amd64-v1.3.3:v1.3.3 192.168.1.5:5000/heapster-influxdb-amd64-v1.3.3:v1.3.3

docker tag docker.io/wanghkkk/heapster-grafana-amd64-v4.4.3:v4.4.3 192.168.1.5:5000/heapster-grafana-amd64-v4.4.3:v4.4.3

docker tag docker.io/wanghkkk/heapster-amd64-v1.4.0:v1.4.0 192.168.1.5:5000/heapster-amd64-v1.4.0:v1.4.0

docker push 192.168.1.5:5000/heapster-influxdb-amd64-v1.3.3:v1.3.3

docker push 192.168.1.5:5000/heapster-grafana-amd64-v4.4.3:v4.4.3

docker push 192.168.1.5:5000/heapster-amd64-v1.4.0:v1.4.0


4,dashboard服务的创建

cd /home/

创建yaml文件,并修改其中的配置项apiserver-host=http://192.168.1.5:8080,image: 192.168.1.5:5000/kubernetes-dashboard:latest

vi kubernetes-dashboard.yaml

kind: Deployment
apiVersion: extensions/v1beta1
metadata:
  labels:
    app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kubernetes-dashboard
  template:
    metadata:
      labels:
        app: kubernetes-dashboard
      # Comment the following annotation if Dashboard must not be deployed on master
      annotations:
        scheduler.alpha.kubernetes.io/tolerations: |
          [
            {
              "key": "dedicated",
              "operator": "Equal",
              "value": "master",
              "effect": "NoSchedule"
            }
          ]
    spec:
      containers:
      - name: kubernetes-dashboard
        image: 192.168.1.5:5000/kubernetes-dashboard:latest
        imagePullPolicy: Always
        ports:
        - containerPort: 9090
          protocol: TCP
        args:
          # Uncomment the following line to manually specify Kubernetes API server Host
          # If not specified, Dashboard will attempt to auto discover the API server and connect
          # to it. Uncomment only if the default does not work.
          - --apiserver-host=http://192.168.1.5:8080
        livenessProbe:
          httpGet:
            path: /
            port: 9090
          initialDelaySeconds: 50
          timeoutSeconds: 50
---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  type: NodePort
  ports:
  - port: 80
    targetPort: 9090
  selector:
    app: kubernetes-dashboard


用下面命令创建删除查看应用

创建    kubectl create -f kubernetes-dashboard.yaml

删除    kubectl delete -f kubernetes-dashboard.yaml

查看pod    kubectl get pods -n kube-system -o wide

[root@k8s-master home]# kubectl get pods -n kube-system -o wide

NAME                                    READY     STATUS    RESTARTS   AGE       IP            NODE

kubernetes-dashboard-3155532917-jgmfh   1/1       Running   0          8h        172.17.29.2   k8s-node-1


查看dashboard的pod创建日志,常常用于排错

kubectl describe pod kubernetes-dashboard-3155532917-jgmfh  -n kube-system


查看service    kubectl get svc -n kube-system -o wide

[root@k8s-master home]# kubectl get svc -n kube-system -o wide

NAME                   CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE       SELECTOR

kubernetes-dashboard   10.254.48.98     <nodes>       80:32482/TCP   8h        app=kubernetes-dashboard


查看dashboard页面

http://192.168.1.5:8080/ui/

spacer.gifUB)]1FIU7`P@FQ28R40GW$W.png


5,创建heapster

cd /home/heapster

创建三个yaml文件,三个文件都要修改对应的镜像地址,heapster.yaml还需要修改


command:
        - /heapster
        - --source=kubernetes:http://192.168.1.5:8080?inClusterConfig=false
        - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086

其中192.168.1.5:8080即apiserver地址,inClusterConfig=false代表不使用service accounts中的kube config信息


vi grafana.yaml

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: monitoring-grafana
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: grafana
    spec:
      containers:
      - name: grafana
        image: 192.168.1.5:5000/heapster-grafana-amd64-v4.4.3:v4.4.3
        ports:
        - containerPort: 3000
          protocol: TCP
        volumeMounts:
        - mountPath: /etc/ssl/certs
          name: ca-certificates
          readOnly: true
        - mountPath: /var
          name: grafana-storage
        env:
        - name: INFLUXDB_HOST
          value: monitoring-influxdb
        - name: GF_SERVER_HTTP_PORT
          value: "3000"
          # The following env variables are required to make Grafana accessible via
          # the kubernetes api-server proxy. On production clusters, we recommend
          # removing these env variables, setup auth for grafana, and expose the grafana
          # service using a LoadBalancer or a public IP.
        - name: GF_AUTH_BASIC_ENABLED
          value: "false"
        - name: GF_AUTH_ANONYMOUS_ENABLED
          value: "true"
        - name: GF_AUTH_ANONYMOUS_ORG_ROLE
          value: Admin
        - name: GF_SERVER_ROOT_URL
          # If you're only using the API Server proxy, set this value instead:
          # value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
          value: /
      volumes:
      - name: ca-certificates
        hostPath:
          path: /etc/ssl/certs
      - name: grafana-storage
        emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
  labels:
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: monitoring-grafana
  name: monitoring-grafana
  namespace: kube-system
spec:
  # In a production setup, we recommend accessing Grafana through an external Loadbalancer
  # or through a public IP.
  # type: LoadBalancer
  # You could also use NodePort to expose the service at a randomly-generated port
  # type: NodePort
  ports:
  - port: 80
    targetPort: 3000
  selector:
    k8s-app: grafana

vi influxdb.yaml

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: monitoring-influxdb
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: influxdb
    spec:
      containers:
      - name: influxdb
        image: 192.168.1.5:5000/heapster-influxdb-amd64-v1.3.3:v1.3.3
        volumeMounts:
        - mountPath: /data
          name: influxdb-storage
      volumes:
      - name: influxdb-storage
        emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
  labels:
    task: monitoring
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: monitoring-influxdb
  name: monitoring-influxdb
  namespace: kube-system
spec:
  ports:
  - port: 8086
    targetPort: 8086
  selector:
    k8s-app: influxdb

vi heapster.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: heapster
  namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: heapster
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: heapster
    spec:
      serviceAccountName: heapster
      containers:
      - name: heapster
        image: 192.168.1.5:5000/heapster-amd64-v1.4.0:v1.4.0
        imagePullPolicy: IfNotPresent
        command:
        - /heapster
        - --source=kubernetes:http://192.168.1.5:8080?inClusterConfig=false
        - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086
---
apiVersion: v1
kind: Service
metadata:
  labels:
    task: monitoring
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: Heapster
  name: heapster
  namespace: kube-system
spec:
  ports:
  - port: 80
    targetPort: 8082
  selector:
    k8s-app: heapster


文件创建修改完成后,执行创建命令

cd /home/heapster/

kubectl create -f .

kubectl get pod -n kube-system -o wide

[root@k8s-master influxdb]# kubectl get pod -n kube-system -o wide

NAME                                    READY     STATUS    RESTARTS   AGE       IP            NODE

heapster-3919175978-5w2d5               1/1       Running   0          6h        172.17.52.3   192.168.1.5

kubernetes-dashboard-3155532917-jgmfh   1/1       Running   0          9h        172.17.29.2   k8s-node-1

monitoring-grafana-3994812335-1dz78     1/1       Running   0          6h        172.17.52.4   192.168.1.5

monitoring-influxdb-265709471-6l3j3     1/1       Running   0          6h        172.17.29.3   k8s-node-1


界面上ui查看

QQ截图20171224004421.png