28-1.kubernetes

3.5deployment

3.5.1演示rc的痛点

deployment也是保证pod高可用的一种方式,明明已经有RC,为什么还要引入deployment呢?
因为deployment解决了RC的一个痛点
演示RC的一个痛点----升级pod后,网络需要手动配置

#1.上面操作的环境如下
[root@k8s-master ~]# kubectl get all
NAME       DESIRED   CURRENT   READY     AGE
rc/myweb   3         3         3         2h

NAME             CLUSTER-IP    EXTERNAL-IP   PORT(S)        AGE
svc/kubernetes   10.254.0.1    <none>        443/TCP        7h
svc/myweb        10.254.6.89   <nodes>       80:10000/TCP   2h

NAME             READY     STATUS    RESTARTS   AGE
po/myweb-p40zr   1/1       Running   0          2h
po/myweb-q5wrj   1/1       Running   0          2h
po/myweb-zrnb4   1/1       Running   0          2h
[root@k8s-master ~]# kubectl get svc
NAME         CLUSTER-IP    EXTERNAL-IP   PORT(S)        AGE
kubernetes   10.254.0.1    <none>        443/TCP        7h
myweb        10.254.6.89   <nodes>       80:10000/TCP   2h
[root@k8s-master ~]# curl -I 10.0.0.11:10000
HTTP/1.1 200 OK
Server: nginx/1.13.12
Date: Wed, 28 Aug 2019 14:12:41 GMT
Content-Type: text/html
Content-Length: 5
Last-Modified: Wed, 28 Aug 2019 12:25:23 GMT
Connection: keep-alive
ETag: "5d6672b3-5"
Accept-Ranges: bytes
#2.升级
[root@k8s-master rc]# kubectl rolling-update myweb -f nginx-rc2.yaml --update-period=1s
Created myweb2
Scaling up myweb2 from 0 to 2, scaling down myweb from 3 to 0 (keep 2 pods available, don't exceed 3 pods)
Scaling myweb down to 2
Scaling myweb2 up to 1
Scaling myweb down to 1
Scaling myweb2 up to 2
Scaling myweb down to 0
Update succeeded. Deleting myweb
replicationcontroller "myweb" rolling updated to "myweb2"
[root@k8s-master rc]# 
## 2.1不能连接到容器中的服务了
[root@k8s-master rc]# curl -I 10.0.0.11:10000
^C
[root@k8s-master rc]#
## 2.2原因是关联的标签还是myweb
svc/myweb        10.254.6.89   <nodes>       80:10000/TCP   2h        app=myweb
[root@k8s-master rc]# kubectl get all -o wide
NAME        DESIRED   CURRENT   READY     AGE       CONTAINER(S)   IMAGE(S)                    SELECTOR
rc/myweb2   2         2         2         2m        myweb2         10.0.0.11:5000/nginx:1.15   app=myweb2

NAME             CLUSTER-IP    EXTERNAL-IP   PORT(S)        AGE       SELECTOR
svc/kubernetes   10.254.0.1    <none>        443/TCP        8h        <none>
svc/myweb        10.254.6.89   <nodes>       80:10000/TCP   2h        app=myweb

NAME              READY     STATUS    RESTARTS   AGE       IP            NODE
po/myweb2-cmv42   1/1       Running   0          2m        172.16.48.3   k8s-node2
po/myweb2-wbj5j   1/1       Running   0          2m        172.16.93.2   k8s-master
[root@k8s-master rc]# 
##2.3此时svc myweb下没有节点
[root@k8s-master rc]# kubectl describe svc myweb 
Name:           myweb
Namespace:      default
Labels:         <none>
Selector:       app=myweb
Type:           NodePort
IP:         10.254.6.89
Port:           <unset> 80/TCP
NodePort:       <unset> 10000/TCP
Endpoints:      <none>
Session Affinity:   None
No events.
##2.4修改关联的标签
  selector:
    app: myweb2
##2.5再查看svc myweb下有关联的节点了
[root@k8s-master rc]# kubectl describe svc myweb 
Name:           myweb
Namespace:      default
Labels:         <none>
Selector:       app=myweb2
Type:           NodePort
IP:         10.254.6.89
Port:           <unset> 80/TCP
NodePort:       <unset> 10000/TCP
Endpoints:      172.16.48.3:80,172.16.93.2:80
Session Affinity:   None
No events.
[root@k8s-master rc]# 
[root@k8s-master rc]# kubectl edit svc myweb

3.5.2创建deployment是主流,rc不怎么使用了

#1.创建deploy
##1.1删除之前创建的pod和service
[root@k8s-master deploy]# kubectl delete rc myweb2
replicationcontroller "myweb2" deleted
[root@k8s-master deploy]# kubectl delete svc myweb
service "myweb" deleted

##1.2创建deploy
vi  nginx-deploy.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: 10.0.0.11:5000/nginx:1.13
        ports:
        - containerPort: 80
[root@k8s-master deploy]# kubectl create  -f  nginx-deploy.yaml
deployment "nginx-deployment" created

##1.3查看创建的资源
[root@k8s-master deploy]# kubectl get all
NAME                      DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/nginx-deployment   3         3         3            3           2m

##1.4关联service
[root@k8s-master deploy]# kubectl expose deployment nginx-deployment --port=80 --type=NodePort
service "nginx-deployment" exposed

##1.5查看资源的状态,创建了svc
svc/nginx-deployment   10.254.122.83   <nodes>       80:57526/TCP   6s
[root@k8s-master deploy]# kubectl get all
NAME                      DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/nginx-deployment   3         3         3            3           4m

NAME                   CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
svc/kubernetes         10.254.0.1      <none>        443/TCP        8h
svc/nginx-deployment   10.254.122.83   <nodes>       80:57526/TCP   6s

NAME                             DESIRED   CURRENT   READY     AGE
rs/nginx-deployment-2602221530   3         3         3         4m

NAME                                   READY     STATUS    RESTARTS   AGE
po/nginx-deployment-2602221530-032n2   1/1       Running   0          4m
po/nginx-deployment-2602221530-3fs60   1/1       Running   0          4m
po/nginx-deployment-2602221530-x57wp   1/1       Running   0          4m
[root@k8s-master deploy]# 

NAME             CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
svc/kubernetes   10.254.0.1   <none>        443/TCP   8h

NAME                             DESIRED   CURRENT   READY     AGE
rs/nginx-deployment-2602221530   3         3         3         2m

NAME                                   READY     STATUS    RESTARTS   AGE
po/nginx-deployment-2602221530-032n2   1/1       Running   0          2m
po/nginx-deployment-2602221530-3fs60   1/1       Running   0          2m
po/nginx-deployment-2602221530-x57wp   1/1       Running   0          2m
[root@k8s-master deploy]# 

##1.6连接容器中的服务
[root@k8s-master deploy]# curl -I 10.0.0.11:57526
HTTP/1.1 200 OK
Server: nginx/1.13.12
Date: Wed, 28 Aug 2019 14:32:56 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Mon, 09 Apr 2018 16:01:09 GMT
Connection: keep-alive
ETag: "5acb8e45-264"
Accept-Ranges: bytes

[root@k8s-master deploy]# 

##1.7升级,修改nginx镜像为1.15
[root@k8s-master deploy]# kubectl edit deployment nginx-deployment
deployment "nginx-deployment" edited

 containers:
      - image: 10.0.0.11:5000/nginx:1.15

##1.8再次连接容器中的服务
[root@k8s-master deploy]# curl -I 10.0.0.11:57526
HTTP/1.1 200 OK
Server: nginx/1.15.12
Date: Wed, 28 Aug 2019 14:35:24 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 16 Apr 2019 13:08:19 GMT
Connection: keep-alive
ETag: "5cb5d3c3-264"
Accept-Ranges: bytes

[root@k8s-master deploy]# 

## 1.9回滚
[root@k8s-master deploy]# kubectl rollout undo deployment nginx-deployment
deployment "nginx-deployment" rolled back
[root@k8s-master deploy]# 

## 1.10连接容器中的服务
[root@k8s-master deploy]# curl -I 10.0.0.11:57526
HTTP/1.1 200 OK
Server: nginx/1.13.12
Date: Wed, 28 Aug 2019 14:36:51 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Mon, 09 Apr 2018 16:01:09 GMT
Connection: keep-alive
ETag: "5acb8e45-264"
Accept-Ranges: bytes

[root@k8s-master deploy]# 

## 1.11查看历史版本,这里CHANGE-CAUSE内容为空
[root@k8s-master deploy]# kubectl rollout history deployment nginx-deployment 
deployments "nginx-deployment"
REVISION    CHANGE-CAUSE
2       <none>
3       <none>

[root@k8s-master deploy]#

## 1.12版本发布,重点是--record这个参数
[root@k8s-master deploy]# kubectl run nginx --image=10.0.0.11:5000/nginx:1.13 --replicas=3 --record
deployment "nginx" created

## 1.13版本升级
[root@k8s-master deploy]# kubectl set image deploy nginx nginx=10.0.0.11:5000/nginx:1.15
deployment "nginx" image updated

## 1.14历史版本查询,就可查看到CHANGE-CAUSE中的内容了
[root@k8s-master deploy]# kubectl rollout history deployment nginx
deployments "nginx"
REVISION    CHANGE-CAUSE
1       kubectl run nginx --image=10.0.0.11:5000/nginx:1.13 --replicas=3 --record
2       kubectl set image deploy nginx nginx=10.0.0.11:5000/nginx:1.15

[root@k8s-master deploy]# 

##1.15回滚到指定版本
[root@k8s-master deploy]# kubectl rollout undo deployment nginx --to-revision=1
deployment "nginx" rolled back
#版本变为了2和3
[root@k8s-master deploy]# kubectl rollout history deployment nginx
deployments "nginx"
REVISION    CHANGE-CAUSE
2       kubectl set image deploy nginx nginx=10.0.0.11:5000/nginx:1.15
3       kubectl run nginx --image=10.0.0.11:5000/nginx:1.13 --replicas=3 --record

[root@k8s-master deploy]# 

3.6tomcat-mysql实验

k8s中不同类型的pod之间如何相互访问?
例如web服务如何访问db服务?
tomcat-mysql练习
#1.准备yml文件
[root@k8s-master tomcat_demo]# cat mysql-rc.yml 
apiVersion: v1
kind: ReplicationController
metadata:
  name: mysql
spec:
  replicas: 1
  selector:
    app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
        - name: mysql
          image: 10.0.0.11:5000/mysql:5.7
          ports:
          - containerPort: 3306
          env:
          - name: MYSQL_ROOT_PASSWORD
            value: '123456'
[root@k8s-master tomcat_demo]# cat mysql-svc.yml 
apiVersion: v1
kind: Service
metadata:
  name: mysql
spec:
  ports:
    - port: 3306
      targetPort: 3306
  selector:
    app: mysql
[root@k8s-master tomcat_demo]# cat tomcat-rc.yml 
apiVersion: v1
kind: ReplicationController
metadata:
  name: myweb
spec:
  replicas: 1
  selector:
    app: myweb
  template:
    metadata:
      labels:
        app: myweb
    spec:
      containers:
        - name: myweb
          image: 10.0.0.11:5000/tomcat-app:v2
          ports:
          - containerPort: 8080
          env:
          - name: MYSQL_SERVICE_HOST
            value: 'mysql'
          - name: MYSQL_SERVICE_PORT
            value: '3306'
[root@k8s-master tomcat_demo]# cat tomcat-svc.yml 
apiVersion: v1
kind: Service
metadata:
  name: myweb
spec:
  type: NodePort
  ports:
    - port: 8080
      nodePort: 30008
  selector:
    app: myweb
[root@k8s-master tomcat_demo]# 

#2.上传镜像到本地仓库
##2.1mysql:5.7
[root@k8s-master tomcat_demo]# docker pull mysql:5.7
Trying to pull repository docker.io/library/mysql ... 
5.7: Pulling from docker.io/library/mysql
9fc222b64b0a: Pull complete 
291e388076f0: Pull complete 
d6634415290b: Pull complete 
1f1e7d852ad4: Pull complete 
125fc05f36e0: Pull complete 
2aed16e5b02f: Pull complete 
5fa9342b7235: Pull complete 
a1e72cc32505: Pull complete 
81225f74ecbe: Pull complete 
b9a45d065520: Pull complete 
a3e7b2fe9950: Pull complete 
Digest: sha256:1a121f2e7590f949b9ede7809395f209dd9910e331e8372e6682ba4bebcc020b
Status: Downloaded newer image for docker.io/mysql:5.7
#10.0.0.11:5000/mysql:5.7这个名称一定要是这个,因为在yml文件中是这样写的,写别的pull不下来
[root@k8s-master tomcat_demo]# docker tag docker.io/mysql:5.7 10.0.0.11:5000/mysql:5.7
[root@k8s-master tomcat_demo]# docker push 10.0.0.11:5000/mysql:5.7
The push refers to a repository [10.0.0.11:5000/mysql]
2ebc9c2f59ec: Mounted from docker.io/mysql 
de9c8788be4d: Mounted from docker.io/mysql 
59f1d30f4003: Mounted from docker.io/mysql 
8ea3faa6f944: Mounted from docker.io/mysql 
bb1ef34119b2: Mounted from docker.io/mysql 
65430c57aee2: Mounted from docker.io/mysql 
1dd5f3e365cf: Mounted from docker.io/mysql 
7f33ce1066af: Mounted from docker.io/mysql 
9f77b78f01a7: Mounted from docker.io/mysql 
f5741d086b76: Mounted from docker.io/mysql 
8fa655db5360: Mounted from docker.io/mysql 
5.7: digest: sha256:2bd4665d9c5ecad61f7ceff82f82e6470c4686b9ec0fd986b84012861506c722 size: 2621

##2.2tomcat
[root@k8s-master k8s]# docker image load -i tomcat-app-v2.tar.gz 
4dcab49015d4: Loading layer [==================================================>] 130.9 MB/130.9 MB
5f70bf18a086: Loading layer [==================================================>] 1.024 kB/1.024 kB
d2c5e3a8d3d3: Loading layer [==================================================>] 45.18 MB/45.18 MB
02adacdfda2f: Loading layer [==================================================>] 1.292 MB/1.292 MB
3b7a0c95e085: Loading layer [==================================================>] 3.584 kB/3.584 kB
bb3e02b5a488: Loading layer [==================================================>] 166.4 MB/166.4 MB
d490458a60cb: Loading layer [==================================================>]  2.56 kB/2.56 kB
2275023dea33: Loading layer [==================================================>]  5.12 kB/5.12 kB
d8ba5f179687: Loading layer [==================================================>] 7.308 MB/7.308 MB
7bb92eb08c02: Loading layer [==================================================>] 119.8 kB/119.8 kB
6d0267f8a9fd: Loading layer [==================================================>] 17.31 MB/17.31 MB
a072f755a133: Loading layer [==================================================>] 2.048 kB/2.048 kB
a8f34017877f: Loading layer [==================================================>] 1.001 MB/1.001 MB
Loaded image: docker.io/kubeguide/tomcat-app:v2
[root@k8s-master k8s]# 
#10.0.0.11:5000/tomcat-app:v2这个名称一定要是这个,因为在yml文件中是这样写的,写别的pull不下来
[root@k8s-master tomcat_demo]# docker tag docker.io/kubeguide/tomcat-app:v2 10.0.0.11:5000/tomcat-app:v2
[root@k8s-master tomcat_demo]# docker push 10.0.0.11:5000/tomcat-app:v2
The push refers to a repository [10.0.0.11:5000/tomcat-app]
a8f34017877f: Mounted from docker.io/kubeguide/tomcat-app 
5f70bf18a086: Mounted from docker.io/kubeguide/tomcat-app 
a072f755a133: Mounted from docker.io/kubeguide/tomcat-app 
6d0267f8a9fd: Mounted from docker.io/kubeguide/tomcat-app 
7bb92eb08c02: Mounted from docker.io/kubeguide/tomcat-app 
d8ba5f179687: Mounted from docker.io/kubeguide/tomcat-app 
2275023dea33: Mounted from docker.io/kubeguide/tomcat-app 
d490458a60cb: Mounted from docker.io/kubeguide/tomcat-app 
bb3e02b5a488: Mounted from docker.io/kubeguide/tomcat-app 
3b7a0c95e085: Mounted from docker.io/kubeguide/tomcat-app 
02adacdfda2f: Mounted from docker.io/kubeguide/tomcat-app 
d2c5e3a8d3d3: Mounted from docker.io/kubeguide/tomcat-app 
4dcab49015d4: Mounted from docker.io/kubeguide/tomcat-app 
v2: digest: sha256:dd1ecbb64640e542819303d5667107a9c162249c14d57581cd09c2a4a19095a0 size: 5719

#3.创建MySQL的rc和svc
删除前面创建的
[root@k8s-master tomcat_demo]#kubectl delete service nginx-deployment
[root@k8s-master tomcat_demo]# kubectl delete deployment nginx
[root@k8s-master tomcat_demo]# kubectl delete deployment nginx-deployment
创建新的
[root@k8s-master tomcat_demo]# kubectl create -f mysql-rc.yml 
replicationcontroller "mysql" created
[root@k8s-master tomcat_demo]# kubectl create -f mysql-svc.yml 
service "mysql" created
[root@k8s-master tomcat_demo]# 

#4.查看信息
[root@k8s-master tomcat_demo]# kubectl get all -o wide
NAME       DESIRED   CURRENT   READY     AGE       CONTAINER(S)   IMAGE(S)                   SELECTOR
rc/mysql   1         1         0         4m        mysql          10.0.0.11:5000/mysql:5.7   app=mysql

NAME             CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE       SELECTOR
svc/kubernetes   10.254.0.1      <none>        443/TCP    8h        <none>
svc/mysql        10.254.154.43   <none>        3306/TCP   4m        app=mysql

NAME             READY     STATUS             RESTARTS   AGE       IP            NODE
po/mysql-gg8ws   0/1       ImagePullBackOff   0          4m        172.16.79.4   k8s-node1
[root@k8s-master tomcat_demo]# 

#5.修改tomcat-rc.yml中的MYSQL_SERVICE_HOST,通过这里的CLUSTER-IP来连接mysq
[root@k8s-master tomcat_demo]# cat tomcat-rc.yml 
apiVersion: v1
kind: ReplicationController
metadata:
  name: myweb
spec:
  replicas: 1
  selector:
    app: myweb
  template:
    metadata:
      labels:
        app: myweb
    spec:
      containers:
        - name: myweb
          image: 10.0.0.11:5000/tomcat-app:v2
          ports:
          - containerPort: 8080
          env:
          - name: MYSQL_SERVICE_HOST
            value: '10.254.154.43'
          - name: MYSQL_SERVICE_PORT
            value: '3306'
[root@k8s-master tomcat_demo]# 

#6.创建tomcat-rc和tomcat-svc
[root@k8s-master tomcat_demo]# kubectl create -f tomcat-rc.yml 
replicationcontroller "myweb" created
[root@k8s-master tomcat_demo]# kubectl create -f tomcat-svc.yml 
service "myweb" created
[root@k8s-master tomcat_demo]# 

#7.查看状态
[root@k8s-master tomcat_demo]# kubectl get all -o wide
NAME       DESIRED   CURRENT   READY     AGE       CONTAINER(S)   IMAGE(S)                       SELECTOR
rc/mysql   1         1         0         15m       mysql          10.0.0.11:5000/mysql:5.7       app=mysql
rc/myweb   1         1         1         5m        myweb          10.0.0.11:5000/tomcat-app:v2   app=myweb

NAME             CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE       SELECTOR
svc/kubernetes   10.254.0.1      <none>        443/TCP          9h        <none>
svc/mysql        10.254.154.43   <none>        3306/TCP         15m       app=mysql
svc/myweb        10.254.90.52    <nodes>       8080:30008/TCP   5m        app=myweb

NAME             READY     STATUS             RESTARTS   AGE       IP            NODE
po/mysql-gg8ws   0/1       ImagePullBackOff   0          15m       172.16.79.4   k8s-node1
po/myweb-sm1dv   1/1       Running            0          5m        172.16.93.2   k8s-master
[root@k8s-master tomcat_demo]# kubectl get all -o wide
NAME       DESIRED   CURRENT   READY     AGE       CONTAINER(S)   IMAGE(S)                       SELECTOR
rc/mysql   1         1         1         16m       mysql          10.0.0.11:5000/mysql:5.7       app=mysql
rc/myweb   1         1         1         6m        myweb          10.0.0.11:5000/tomcat-app:v2   app=myweb

NAME             CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE       SELECTOR
svc/kubernetes   10.254.0.1      <none>        443/TCP          9h        <none>
svc/mysql        10.254.154.43   <none>        3306/TCP         16m       app=mysql
svc/myweb        10.254.90.52    <nodes>       8080:30008/TCP   6m        app=myweb

NAME             READY     STATUS    RESTARTS   AGE       IP            NODE
po/mysql-gg8ws   1/1       Running   0          16m       172.16.79.4   k8s-node1
po/myweb-sm1dv   1/1       Running   0          6m        172.16.93.2   k8s-master
[root@k8s-master tomcat_demo]# 

#8.进入到mysql容器中查看数据
[root@k8s-master tomcat_demo]# kubectl exec -it mysql-gg8ws bash
root@mysql-gg8ws:/# mysql -uroot -p123456
mysql: [Warning] Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 5
Server version: 5.7.27 MySQL Community Server (GPL)

Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> show databases
    -> ;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| HPE_APP            |
| mysql              |
| performance_schema |
| sys                |
+--------------------+
5 rows in set (0.00 sec)

mysql> 
mysql> use HPE_APP
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
mysql> show tables;
+-------------------+
| Tables_in_HPE_APP |
+-------------------+
| T_USERS           |
+-------------------+
1 row in set (0.00 sec)

mysql> select * from T_USERS;
+----+---------------+-------+
| ID | USER_NAME     | LEVEL |
+----+---------------+-------+
|  1 | me            | 100   |
|  2 | our team      | 100   |
|  3 | HPE           | 100   |
|  4 | teacher       | 100   |
|  5 | docker        | 100   |
|  6 | google        | 100   |
|  7 | qwqwqwqwqwadd | 100   |
+----+---------------+-------+
7 rows in set (0.01 sec)

mysql> 

#9.扩大副本数为3
[root@k8s-master tomcat_demo]# kubectl scale rc myweb --replicas=3
replicationcontroller "myweb" scaled
[root@k8s-master tomcat_demo]# kubectl get all
NAME       DESIRED   CURRENT   READY     AGE
rc/mysql   1         1         1         23m
rc/myweb   3         3         1         13m

NAME             CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
svc/kubernetes   10.254.0.1      <none>        443/TCP          9h
svc/mysql        10.254.154.43   <none>        3306/TCP         23m
svc/myweb        10.254.90.52    <nodes>       8080:30008/TCP   13m

NAME             READY     STATUS              RESTARTS   AGE
po/mysql-gg8ws   1/1       Running             0          23m
po/myweb-1j45c   0/1       ContainerCreating   0          8s
po/myweb-d7twc   0/1       ContainerCreating   0          8s
po/myweb-sm1dv   1/1       Running             0          13m
[root@k8s-master tomcat_demo]# 

28-1.kubernetes
28-1.kubernetes
28-1.kubernetes
28-1.kubernetes

4.k8s弹性伸缩

4.1安装dns附件组件并使用

Kubernetes中有一个很重要的特性,服务自发现。一旦一个service被创建,该service的service IP和service port等信息都可以被注入到pod中供它们使用。
Kubernetes主要支持两种service发现 机制:环境变量和DNS。
没有dns服务的时候,kubernetes会采用环境变量的形式,一旦有很多个service,环境变量会变得很复杂,为了解决这个问题,我们使用DNS服务。
#1.进入到一个容器中,查看环境变量
[root@k8s-master tomcat_demo]# kubectl get all
NAME       DESIRED   CURRENT   READY     AGE
rc/mysql   1         1         1         30m
rc/myweb   3         3         3         19m

NAME             CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
svc/kubernetes   10.254.0.1      <none>        443/TCP          9h
svc/mysql        10.254.154.43   <none>        3306/TCP         30m
svc/myweb        10.254.90.52    <nodes>       8080:30008/TCP   19m

NAME             READY     STATUS    RESTARTS   AGE
po/mysql-gg8ws   1/1       Running   0          30m
po/myweb-1j45c   1/1       Running   0          6m
po/myweb-d7twc   1/1       Running   0          6m
po/myweb-sm1dv   1/1       Running   0          19m
[root@k8s-master tomcat_demo]# kubectl exec -it myweb-sm1dv bash
root@myweb-sm1dv:/usr/local/tomcat# 
root@myweb-sm1dv:/usr/local/tomcat# env
MYWEB_PORT_8080_TCP_PORT=8080
HOSTNAME=myweb-sm1dv
TOMCAT_VERSION=8.0.35
KUBERNETES_PORT_443_TCP_PORT=443
KUBERNETES_PORT=tcp://10.254.0.1:443
TERM=xterm
CATALINA_HOME=/usr/local/tomcat
MYWEB_PORT_8080_TCP_ADDR=10.254.90.52
MYWEB_PORT_8080_TCP_PROTO=tcp
KUBERNETES_SERVICE_PORT=443
KUBERNETES_SERVICE_HOST=10.254.0.1
MYWEB_SERVICE_HOST=10.254.90.52
MYSQL_PORT_3306_TCP_PORT=3306
MYSQL_PORT_3306_TCP=tcp://10.254.154.43:3306
MYWEB_PORT_8080_TCP=tcp://10.254.90.52:8080
PATH=/usr/local/tomcat/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
PWD=/usr/local/tomcat
JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64/jre
MYWEB_SERVICE_PORT=8080
LANG=C.UTF-8
JAVA_VERSION=7u101
SHLVL=1
HOME=/root
JAVA_DEBIAN_VERSION=7u101-2.6.6-1~deb8u1
TOMCAT_MAJOR=8
OPENSSL_VERSION=1.0.2h-1
KUBERNETES_PORT_443_TCP_PROTO=tcp
MYSQL_PORT_3306_TCP_PROTO=tcp
KUBERNETES_SERVICE_PORT_HTTPS=443
MYSQL_PORT_3306_TCP_ADDR=10.254.154.43
MYSQL_SERVICE_PORT=3306
TOMCAT_TGZ_URL=https://www.apache.org/dist/tomcat/tomcat-8/v8.0.35/bin/apache-tomcat-8.0.35.tar.gz
KUBERNETES_PORT_443_TCP_ADDR=10.254.0.1
MYSQL_PORT=tcp://10.254.154.43:3306
KUBERNETES_PORT_443_TCP=tcp://10.254.0.1:443
MYWEB_PORT=tcp://10.254.90.52:8080
MYSQL_SERVICE_HOST=10.254.154.43
_=/usr/bin/env
root@myweb-sm1dv:/usr/local/tomcat# 

#2.新建dns
##2.1准备yml
 - --kube-master-url=http://10.0.0.11:8080这里修改下
[root@k8s-master skydns]# cat skydns-rc.yaml 
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# TODO - At some point, we need to rename all skydns-*.yaml.* files to kubedns-*.yaml.*
# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
# in sync with this file.

# __MACHINE_GENERATED_WARNING__

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
spec:
  replicas: 1
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    rollingUpdate:
      maxSurge: 10%
      maxUnavailable: 0
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
        scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
    spec:
      containers:
      - name: kubedns
        image: myhub.fdccloud.com/library/kubedns-amd64:1.9
        resources:
          # TODO: Set memory limits when we've profiled the container for large
          # clusters, then set request = limit to keep this container in
          # guaranteed class. Currently, this container falls into the
          # "burstable" category so the kubelet doesn't backoff from restarting it.
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        livenessProbe:
          httpGet:
            path: /healthz-kubedns
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /readiness
            port: 8081
            scheme: HTTP
          # we poll on pod startup for the Kubernetes master service and
          # only setup the /readiness HTTP server once that's available.
          initialDelaySeconds: 3
          timeoutSeconds: 5
        args:
        - --domain=cluster.local.
        - --dns-port=10053
        - --config-map=kube-dns
        - --kube-master-url=http://10.0.0.11:8080
        # This should be set to v=2 only after the new image (cut from 1.5) has
        # been released, otherwise we will flood the logs.
        - --v=0
        #__PILLAR__FEDERATIONS__DOMAIN__MAP__
        env:
        - name: PROMETHEUS_PORT
          value: "10055"
        ports:
        - containerPort: 10053
          name: dns-local
          protocol: UDP
        - containerPort: 10053
          name: dns-tcp-local
          protocol: TCP
        - containerPort: 10055
          name: metrics
          protocol: TCP
      - name: dnsmasq
        image: myhub.fdccloud.com/library/kube-dnsmasq-amd64:1.4
        livenessProbe:
          httpGet:
            path: /healthz-dnsmasq
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - --cache-size=1000
        - --no-resolv
        - --server=127.0.0.1#10053
        #- --log-facility=-
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
        resources:
          requests:
            cpu: 150m
            memory: 10Mi
      - name: dnsmasq-metrics
        image: myhub.fdccloud.com/library/dnsmasq-metrics-amd64:1.0
        livenessProbe:
          httpGet:
            path: /metrics
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - --v=2
        - --logtostderr
        ports:
        - containerPort: 10054
          name: metrics
          protocol: TCP
        resources:
          requests:
            memory: 10Mi
      - name: healthz
        image: myhub.fdccloud.com/library/exechealthz-amd64:1.2
        resources:
          limits:
            memory: 50Mi
          requests:
            cpu: 10m
            # Note that this container shouldn't really need 50Mi of memory. The
            # limits are set higher than expected pending investigation on #29688.
            # The extra memory was stolen from the kubedns container to keep the
            # net memory requested by the pod constant.
            memory: 50Mi
        args:
        - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null
        - --url=/healthz-dnsmasq
        - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 >/dev/null
        - --url=/healthz-kubedns
        - --port=8080
        - --quiet
        ports:
        - containerPort: 8080
          protocol: TCP
      dnsPolicy: Default  # Don't use cluster DNS.
[root@k8s-master skydns]# 

[root@k8s-master skydns]# kubectl get all
NAME       DESIRED   CURRENT   READY     AGE
rc/mysql   1         1         1         43m
rc/myweb   3         3         3         33m

NAME             CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
svc/kubernetes   10.254.0.1      <none>        443/TCP          9h
svc/mysql        10.254.154.43   <none>        3306/TCP         43m
svc/myweb        10.254.90.52    <nodes>       8080:30008/TCP   33m

NAME             READY     STATUS    RESTARTS   AGE
po/mysql-gg8ws   1/1       Running   0          43m
po/myweb-1j45c   1/1       Running   0          20m
po/myweb-d7twc   1/1       Running   0          20m
po/myweb-sm1dv   1/1       Running   0          33m

#只要clusterIP: 10.254.230.254这个CLUSTER_IP没有被占用就可以
[root@k8s-master skydns]# cat skydns-svc.yaml 
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# TODO - At some point, we need to rename all skydns-*.yaml.* files to kubedns-*.yaml.*

# Warning: This is a file generated from the base underscore template file: skydns-svc.yaml.base

apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "KubeDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.254.230.254
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
[root@k8s-master skydns]# 

##2.2创建
[root@k8s-master skydns]# kubectl create -f skydns-rc.yaml 
deployment "kube-dns" created

[root@k8s-master skydns]# kubectl create -f skydns-svc.yaml 
service "kube-dns" created

##2.3查看pod,该pod在指定的namespace中
[root@k8s-master skydns]# kubectl get pod --namespace=kube-system
NAME                       READY     STATUS    RESTARTS   AGE
kube-dns-453684471-99j2v   4/4       Running   0          3m
[root@k8s-master skydns]#

##2.4查看所有资源
[root@k8s-master skydns]# kubectl get all --namespace=kube-system
NAME              DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/kube-dns   1         1         1            1           5m

NAME           CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
svc/kube-dns   10.254.230.254   <none>        53/UDP,53/TCP   3m

NAME                    DESIRED   CURRENT   READY     AGE
rs/kube-dns-453684471   1         1         1         5m

NAME                          READY     STATUS    RESTARTS   AGE
po/kube-dns-453684471-99j2v   4/4       Running   0          5m
[root@k8s-master skydns]# 

##2.5所有节点修改kubelet配置,使用dns
[root@k8s-node1 ~]# vim /etc/kubernetes/kubelet 
#添加如下内容
KUBELET_ARGS="--cluster_dns=10.254.230.254 --cluster_domain=cluster.local"
[root@k8s-node1 ~]# systemctl restart kubelet.service
[root@k8s-node1 ~]# 

##2.6测试dns
[root@k8s-master skydns]# cat test_dns_pod.yaml 
apiVersion: v1
kind: Pod
metadata:
  labels:
    name: busybox
    role: master
  name: busybox2
spec:
  containers:
  - name: busybox
    image: docker.io/busybox:latest
    imagePullPolicy: IfNotPresent
    command:
    - sleep
    - "3600"
[root@k8s-master skydns]# 
##2.7创建
[root@k8s-master skydns]# kubectl create -f test_dns_pod.yaml 
pod "busybox2" created

[root@k8s-master skydns]# kubectl get all
NAME       DESIRED   CURRENT   READY     AGE
rc/mysql   1         1         1         54m
rc/myweb   3         3         3         44m

NAME             CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
svc/kubernetes   10.254.0.1      <none>        443/TCP          9h
svc/mysql        10.254.154.43   <none>        3306/TCP         54m
svc/myweb        10.254.90.52    <nodes>       8080:30008/TCP   44m

NAME             READY     STATUS    RESTARTS   AGE
po/busybox2      1/1       Running   0          20s
po/mysql-gg8ws   1/1       Running   0          54m
po/myweb-1j45c   1/1       Running   0          31m
po/myweb-d7twc   1/1       Running   0          31m
po/myweb-sm1dv   1/1       Running   0          44m
[root@k8s-master skydns]# 

##2.8进入到新建的pod中
[root@k8s-master ~]# kubectl get svc
NAME         CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
kubernetes   10.254.0.1      <none>        443/TCP          9h
mysql        10.254.154.43   <none>        3306/TCP         56m
myweb        10.254.90.52    <nodes>       8080:30008/TCP   46m
[root@k8s-master ~]# 
#查看到与上面的CLUSTER_IP相同
[root@k8s-master skydns]# kubectl exec -it busybox2 sh
/ # nslookup mysql
Server:     10.254.230.254
Address:    10.254.230.254:53

Name:   mysql.default.svc.cluster.local
Address: 10.254.154.43

/ # nslookup myweb
Server:     10.254.230.254
Address:    10.254.230.254:53

Name:   myweb.default.svc.cluster.local
Address: 10.254.90.52

##2.9tomcat mysql实验使用dns
###2.9.1批量删除
[root@k8s-master tomcat_demo]# kubectl delete -f .
replicationcontroller "mysql" deleted
service "mysql" deleted
replicationcontroller "myweb" deleted
service "myweb" deleted
[root@k8s-master tomcat_demo]# ll
total 16
-rw-r--r-- 1 root root 416 Apr  9 09:10 mysql-rc.yml
-rw-r--r-- 1 root root 145 Apr  9 10:02 mysql-svc.yml
-rw-r--r-- 1 root root 491 Aug 28 23:18 tomcat-rc.yml
-rw-r--r-- 1 root root 162 Jul 13  2017 tomcat-svc.yml
[root@k8s-master tomcat_demo]# 

###2.9.2tomcat-rc.yml中可以使用svc的名称了--- name: MYSQL_SERVICE_HOST   value: 'mysql'
[root@k8s-master tomcat_demo]# cat tomcat-rc.yml 
apiVersion: v1
kind: ReplicationController
metadata:
  name: myweb
spec:
  replicas: 1
  selector:
    app: myweb
  template:
    metadata:
      labels:
        app: myweb
    spec:
      containers:
        - name: myweb
          image: 10.0.0.11:5000/tomcat-app:v2
          ports:
          - containerPort: 8080
          env:
          - name: MYSQL_SERVICE_HOST
            value: 'mysql'
          - name: MYSQL_SERVICE_PORT
            value: '3306'

###2.9.3批量创建
[root@k8s-master tomcat_demo]# kubectl create -f .
replicationcontroller "mysql" created
service "mysql" created
replicationcontroller "myweb" created
service "myweb" created
[root@k8s-master tomcat_demo]# 

###2.9.4查看状态
[root@k8s-master tomcat_demo]# kubectl get all
NAME       DESIRED   CURRENT   READY     AGE
rc/mysql   1         1         1         45s
rc/myweb   1         1         1         45s

NAME             CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
svc/kubernetes   10.254.0.1       <none>        443/TCP          9h
svc/mysql        10.254.233.103   <none>        3306/TCP         45s
svc/myweb        10.254.96.244    <nodes>       8080:30008/TCP   45s

NAME             READY     STATUS    RESTARTS   AGE
po/busybox2      1/1       Running   0          8m
po/mysql-fq3jt   1/1       Running   0          45s
po/myweb-bhl1v   1/1       Running   0          45s
[root@k8s-master tomcat_demo]#
###2.9.5访问数据库验证

28-1.kubernetes

4.2pod的健康检查

探针的种类:
livenessProbe:健康状态检查,周期性检查服务是否存活,检查结果失败,将重启容器

readinessProbe:可用性检查,周期性检查服务是否可用,不可用将从service的endpoints中移除

探针的检测方法:
exec:执行一段命令
httpGet:检测某个 http 请求的返回状态码
tcpSocket:测试某个端口是否能够连接
#1.liveness探针的exec使用
vi  nginx_pod_exec.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: exec
spec:
  containers:
    - name: nginx
      image: 10.0.0.11:5000/nginx:1.13
      ports:
        - containerPort: 80
      args:
        - /bin/sh
        - -c
        - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
      livenessProbe:
        exec:
          command:
            - cat
            - /tmp/healthy
        initialDelaySeconds: 5
        periodSeconds: 5
## 1.1查看重启了两次
[root@k8s-master health]# kubectl get pod exec
NAME      READY     STATUS    RESTARTS   AGE
exec      1/1       Running   2          3m
## 1.2查看重启记录
[root@k8s-master health]# kubectl describe pod exec
Name:       exec
Namespace:  default
Node:       k8s-node1/10.0.0.12
Start Time: Thu, 29 Aug 2019 09:25:16 +0800
Labels:     <none>
Status:     Running
IP:     172.16.79.2
Controllers:    <none>
Containers:
  nginx:
    Container ID:   docker://cc92f566f8ead672353927bfac77ab11382c7dd5b5447c5e1175b9b072dd1a42
    Image:      10.0.0.11:5000/nginx:1.13
    Image ID:       docker-pullable://10.0.0.11:5000/nginx@sha256:e4f0474a75c510f40b37b6b7dc2516241ffa8bde5a442bde3d372c9519c84d90
    Port:       80/TCP
    Args:
      /bin/sh
      -c
      touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
    State:          Running
      Started:          Thu, 29 Aug 2019 09:27:46 +0800
    Last State:         Terminated
      Reason:           Error
      Exit Code:        137
      Started:          Thu, 29 Aug 2019 09:26:31 +0800
      Finished:         Thu, 29 Aug 2019 09:27:46 +0800
    Ready:          True
    Restart Count:      2
    Liveness:           exec [cat /tmp/healthy] delay=5s timeout=1s period=5s #success=1 #failure=3
    Volume Mounts:      <none>
    Environment Variables:  <none>
Conditions:
  Type      Status
  Initialized   True 
  Ready     True 
  PodScheduled  True 
No volumes.
QoS Class:  BestEffort
Tolerations:    <none>
Events:
  FirstSeen LastSeen    Count   From            SubObjectPath       Type        Reason      Message
  --------- --------    -----   ----            -------------       --------    ------      -------
  2m        2m      1   {default-scheduler }                Normal      Scheduled   Successfully assigned exec to k8s-node1
  2m        2m      1   {kubelet k8s-node1} spec.containers{nginx}  Normal      Created     Created container with docker id d5548bf14485; Security:[seccomp=unconfined]
  2m        2m      1   {kubelet k8s-node1} spec.containers{nginx}  Normal      Started     Started container with docker id d5548bf14485
  1m        1m      1   {kubelet k8s-node1} spec.containers{nginx}  Normal      Started     Started container with docker id af6fa4039e94
  1m        1m      1   {kubelet k8s-node1} spec.containers{nginx}  Normal      Killing     Killing container with docker id d5548bf14485: pod "exec_default(db31ab6c-c9fb-11e9-90d0-000c29e15b21)" container "nginx" is unhealthy, it will be killed and re-created.
  1m        1m      1   {kubelet k8s-node1} spec.containers{nginx}  Normal      Created     Created container with docker id af6fa4039e94; Security:[seccomp=unconfined]
  2m        57s     6   {kubelet k8s-node1} spec.containers{nginx}  Warning     Unhealthy   Liveness probe failed: cat: /tmp/healthy: No such file or directory

  2m    27s 3   {kubelet k8s-node1} spec.containers{nginx}  Normal  Pulled  Container image "10.0.0.11:5000/nginx:1.13" already present on machine
  27s   27s 1   {kubelet k8s-node1} spec.containers{nginx}  Normal  Killing Killing container with docker id af6fa4039e94: pod "exec_default(db31ab6c-c9fb-11e9-90d0-000c29e15b21)" container "nginx" is unhealthy, it will be killed and re-created.
  27s   27s 1   {kubelet k8s-node1} spec.containers{nginx}  Normal  Created Created container with docker id cc92f566f8ea; Security:[seccomp=unconfined]
  27s   27s 1   {kubelet k8s-node1} spec.containers{nginx}  Normal  Started Started container with docker id cc92f566f8ea

#2.liveness探针的httpGet使用
##2.1创建
vi   nginx_pod_httpGet.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: httpget
spec:
  containers:
    - name: nginx
      image: 10.0.0.11:5000/nginx:1.13
      ports:
        - containerPort: 80
      livenessProbe:
        httpGet:
          path: /index.html
          port: 80
        initialDelaySeconds: 3
        periodSeconds: 3
[root@k8s-master health]# kubectl create -f nginx_pod_httpGet.yaml
##2.2进入容器中,把index.html重命名
[root@k8s-master health]# kubectl exec -it httpget bash
root@httpget:/# cd /usr/share/nginx/
root@httpget:/usr/share/nginx# ll
bash: ll: command not found
root@httpget:/usr/share/nginx# ls 
html
root@httpget:/usr/share/nginx# cd html/
root@httpget:/usr/share/nginx/html# ls
50x.html  index.html
root@httpget:/usr/share/nginx/html# mv index.html index.htmlbak
root@httpget:/usr/share/nginx/html# 
##2.3发现index.html好了,很快就好了,几乎就是几秒钟
[root@k8s-master health]# kubectl exec -it httpget bash
root@httpget:/# cd /usr/share/nginx/html/
root@httpget:/usr/share/nginx/html# ls            
50x.html  index.html
root@httpget:/usr/share/nginx/html# 

##2.4查看信息
[root@k8s-master ~]# kubectl get pod httpget
NAME      READY     STATUS    RESTARTS   AGE
httpget   1/1       Running   1          1m
[root@k8s-master ~]# kubectl describe pod httpget
Name:       httpget
Namespace:  default
Node:       k8s-node1/10.0.0.12
Start Time: Thu, 29 Aug 2019 09:32:02 +0800
Labels:     <none>
Status:     Running
IP:     172.16.79.3
Controllers:    <none>
Containers:
  nginx:
    Container ID:       docker://572c67c1f13c1b2c5716c670ef86f3538d78beaedd16db2ce14d48ac4d170332
    Image:          10.0.0.11:5000/nginx:1.13
    Image ID:           docker-pullable://10.0.0.11:5000/nginx@sha256:e4f0474a75c510f40b37b6b7dc2516241ffa8bde5a442bde3d372c9519c84d90
    Port:           80/TCP
    State:          Running
      Started:          Thu, 29 Aug 2019 09:33:26 +0800
    Last State:         Terminated
      Reason:           Completed
      Exit Code:        0
      Started:          Thu, 29 Aug 2019 09:32:02 +0800
      Finished:         Thu, 29 Aug 2019 09:33:26 +0800
    Ready:          True
    Restart Count:      1
    Liveness:           http-get http://:80/index.html delay=3s timeout=1s period=3s #success=1 #failure=3
    Volume Mounts:      <none>
    Environment Variables:  <none>
Conditions:
  Type      Status
  Initialized   True 
  Ready     True 
  PodScheduled  True 
No volumes.
QoS Class:  BestEffort
Tolerations:    <none>
Events:
  FirstSeen LastSeen    Count   From            SubObjectPath       Type        Reason      Message
  --------- --------    -----   ----            -------------       --------    ------      -------
  1m        1m      1   {default-scheduler }                Normal      Scheduled   Successfully assigned httpget to k8s-node1
  1m        1m      1   {kubelet k8s-node1} spec.containers{nginx}  Normal      Created     Created container with docker id e49065256b3f; Security:[seccomp=unconfined]
  1m        1m      1   {kubelet k8s-node1} spec.containers{nginx}  Normal      Started     Started container with docker id e49065256b3f
  1m        20s     2   {kubelet k8s-node1} spec.containers{nginx}  Normal      Pulled      Container image "10.0.0.11:5000/nginx:1.13" already present on machine
  26s       20s     3   {kubelet k8s-node1} spec.containers{nginx}  Warning     Unhealthy   Liveness probe failed: HTTP probe failed with statuscode: 404
  20s       20s     1   {kubelet k8s-node1} spec.containers{nginx}  Normal      Killing     Killing container with docker id e49065256b3f: pod "httpget_default(ccfe91d9-c9fc-11e9-90d0-000c29e15b21)" container "nginx" is unhealthy, it will be killed and re-created.
  20s       20s     1   {kubelet k8s-node1} spec.containers{nginx}  Normal      Created     Created container with docker id 572c67c1f13c; Security:[seccomp=unconfined]
  20s       20s     1   {kubelet k8s-node1} spec.containers{nginx}  Normal      Started     Started container with docker id 572c67c1f13c

#3.liveness探针的tcpSocket使用
## 3.1创建pod
vi   nginx_pod_tcpSocket.yaml
apiVersion: v1
kind: Pod
metadata:
  name: tcpSocket
spec:
  containers:
    - name: nginx
      image: 10.0.0.11:5000/nginx:1.13
      ports:
        - containerPort: 80
      livenessProbe:
        tcpSocket:
          port: 80
        initialDelaySeconds: 3
        periodSeconds: 3

#4.readiness探针的httpGet使用
##4.1创建pod
vi   nginx-rc-httpGet.yaml
apiVersion: v1
kind: ReplicationController
metadata:
  name: readiness
spec:
  replicas: 2
  selector:
    app: readiness
  template:
    metadata:
      labels:
        app: readiness
    spec:
      containers:
      - name: readiness
        image: 10.0.0.11:5000/nginx:1.13
        ports:
        - containerPort: 80
        readinessProbe:
          httpGet:
            path: /qiangge.html
            port: 80
          initialDelaySeconds: 3
          periodSeconds: 3
[root@k8s-master health]# kubectl create -f nginx-rc-httpGet.yaml 
replicationcontroller "readiness" created

##4.2创建service
[root@k8s-master health]# kubectl expose rc readiness --port=80
service "readiness" exposed

##4.3此时endpoint中是没有信息的
readinessProbe:可用性检查,周期性检查服务是否可用,不可用将从service的endpoints中移除
因为检查到服务不可用,从svc中删除了
[root@k8s-master health]# kubectl describe svc readiness 
Name:           readiness
Namespace:      default
Labels:         app=readiness
Selector:       app=readiness
Type:           ClusterIP
IP:         10.254.164.201
Port:           <unset> 80/TCP
Endpoints:      
Session Affinity:   None
No events.
[root@k8s-master health]#

##4.4查看readiness的pod已经创建,但是没有与svc关联上,是因为还没有探针要检测的qiangge.html
readinessProbe:可用性检查,周期性检查服务是否可用,不可用将从service的endpoints中移除
[root@k8s-master health]# kubectl get all -o wide
NAME           DESIRED   CURRENT   READY     AGE       CONTAINER(S)   IMAGE(S)                       SELECTOR
rc/mysql       1         1         1         9h        mysql          10.0.0.11:5000/mysql:5.7       app=mysql
rc/myweb       1         1         1         9h        myweb          10.0.0.11:5000/tomcat-app:v2   app=myweb
rc/readiness   2         2         0         2m        readiness      10.0.0.11:5000/nginx:1.13      app=readiness

NAME             CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE       SELECTOR
svc/kubernetes   10.254.0.1       <none>        443/TCP          19h       <none>
svc/mysql        10.254.233.103   <none>        3306/TCP         9h        app=mysql
svc/myweb        10.254.96.244    <nodes>       8080:30008/TCP   9h        app=myweb
svc/readiness    10.254.164.201   <none>        80/TCP           1m        app=readiness

NAME                 READY     STATUS    RESTARTS   AGE       IP            NODE
po/busybox2          1/1       Running   0          9h        172.16.48.3   k8s-node2
po/exec              1/1       Running   8          17m       172.16.79.2   k8s-node1
po/httpget           1/1       Running   1          10m       172.16.79.3   k8s-node1
po/mysql-3vhk0       1/1       Running   0          33m       172.16.48.2   k8s-node2
po/myweb-bhl1v       1/1       Running   0          9h        172.16.93.2   k8s-master
po/readiness-q2cq7   0/1       Running   0          2m        172.16.93.5   k8s-master
po/readiness-w9c2s   0/1       Running   0          2m        172.16.48.4   k8s-node2
[root@k8s-master health]# 

##4.5进入到一个pod中,创建需要的文件
[root@k8s-master health]# kubectl exec -it readiness-q2cq7 bash
root@readiness-q2cq7:/# cd /usr/share/nginx/html/
root@readiness-q2cq7:/usr/share/nginx/html# ll
bash: ll: command not found
root@readiness-q2cq7:/usr/share/nginx/html# ls 
50x.html  index.html
root@readiness-q2cq7:/usr/share/nginx/html# echo qiangge>qiangge.html

##4.6发现endpoint中发现了一个节点
[root@k8s-master ~]# kubectl describe svc readiness
Name:           readiness
Namespace:      default
Labels:         app=readiness
Selector:       app=readiness
Type:           ClusterIP
IP:         10.254.164.201
Port:           <unset> 80/TCP
Endpoints:      172.16.93.5:80
Session Affinity:   None
No events.
[root@k8s-master ~]# 

4.3dashboard

dashboard
#1.上传准备好的镜像kubernetes-dashboard-amd64:v1.4.1到私有仓库
[root@k8s-master dashboard]# docker image load -i kubernetes-dashboard-amd64_v1.4.1.tar.gz 
5f70bf18a086: Loading layer [==================================================>] 1.024 kB/1.024 kB
2e350fa8cbdf: Loading layer [==================================================>] 86.96 MB/86.96 MB
Loaded image: index.tenxcloud.com/google_containers/kubernetes-dashboard-amd64:v1.4.1
[root@k8s-master dashboard]# docker images
REPOSITORY                                                         TAG                 IMAGE ID            CREATED             SIZE
index.tenxcloud.com/google_containers/kubernetes-dashboard-amd64   v1.4.1              1dda73f463b2        2 years ago         86.8 MB

[root@k8s-master dashboard]# docker image tag index.tenxcloud.com/google_containers/kubernetes-dashboard-amd64:v1.4.1 10.0.0.11:5000/kubernetes-dashboard-amd64:v1.4.1
[root@k8s-master dashboard]# docker push  10.0.0.11:5000/kubernetes-dashboard-amd64:v1.4.1
The push refers to a repository [10.0.0.11:5000/kubernetes-dashboard-amd64]
5f70bf18a086: Mounted from tomcat-app 
2e350fa8cbdf: Pushed 
v1.4.1: digest: sha256:e446d645ff6e6b3147205c58258c2fb431105dc46998e4d742957623bf028014 size: 1147
[root@k8s-master dashboard]# 

#2.pod创建
[root@k8s-master dashboard]# cat dashboard-deploy.yaml 
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
# Keep the name in sync with image version and
# gce/coreos/kube-manifests/addons/dashboard counterparts
  name: kubernetes-dashboard-latest
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
        version: latest
        kubernetes.io/cluster-service: "true"
    spec:
      containers:
      - name: kubernetes-dashboard
        image: 10.0.0.11:5000/kubernetes-dashboard-amd64:v1.4.1
        resources:
          # keep request = limit to keep this container in guaranteed class
          limits:
            cpu: 100m
            memory: 50Mi
          requests:
            cpu: 100m
            memory: 50Mi
        ports:
        - containerPort: 9090
        args:
         -  --apiserver-host=http://10.0.0.11:8080
        livenessProbe:
          httpGet:
            path: /
            port: 9090
          initialDelaySeconds: 30
          timeoutSeconds: 30
[root@k8s-master dashboard]# 
[root@k8s-master dashboard]# cat dashboard-svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
spec:
  selector:
    k8s-app: kubernetes-dashboard
  ports:
  - port: 80
    targetPort: 9090

[root@k8s-master dashboard]# kubectl create -f dashboard-deploy.yaml 
deployment "kubernetes-dashboard-latest" created
[root@k8s-master dashboard]# kubectl create -f dashboard-svc.yaml 
service "kubernetes-dashboard" created

#3.查看
[root@k8s-master dashboard]# kubectl get all --namespace=kube-system
NAME                                 DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/kube-dns                      1         1         1            1           10h
deploy/kubernetes-dashboard-latest   1         1         1            1           22s

NAME                       CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
svc/kube-dns               10.254.230.254   <none>        53/UDP,53/TCP   10h
svc/kubernetes-dashboard   10.254.212.163   <none>        80/TCP          15s

NAME                                        DESIRED   CURRENT   READY     AGE
rs/kube-dns-453684471                       1         1         1         10h
rs/kubernetes-dashboard-latest-1012954252   1         1         1         22s

NAME                                              READY     STATUS    RESTARTS   AGE
po/kube-dns-453684471-99j2v                       4/4       Running   0          10h
po/kubernetes-dashboard-latest-1012954252-n16fw   1/1       Running   0          22s
[root@k8s-master dashboard]# 

28-1.kubernetes

10.0.0.11:8080自动跳转到dashboard界面

28-1.kubernetes
28-1.kubernetes

4.4namespace

Namespace(命名空间)是Kubernetes系统中的另一个非常重要的概念,Namespace在很多情况下用于实现多租户的资源隔离。
阿里云

cms项目用到数据库
商城项目也需要数据库
同一个namespace下面不允许出现两个service叫mysql

生产环境中把不同的应用放到不同的namespace中。
namespace管理命令

创建namespace
[root@k8s-master k8s]# kubectl create namespace qiangge
namespace "qiangge" created

查看namespace
[root@k8s-master k8s]# kubectl get namespace 
NAME          STATUS    AGE
default       Active    8d
kube-system   Active    8d
qiangge       Active    3s

删除namespace
[root@k8s-master k8s]# kubectl delete namespace qiangge
namespace "qiangge" deleted
注:特别危险!会删除namespace下所有的k8s资源
#1.查看所有namespace下的资源
[root@k8s-master dashboard]# kubectl get namespace
NAME          STATUS    AGE
default       Active    20h
kube-system   Active    20h
[root@k8s-master dashboard]# kubectl get all --all-namespaces 
NAMESPACE     NAME                                 DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
kube-system   deploy/kube-dns                      1         1         1            1           10h
kube-system   deploy/kubernetes-dashboard-latest   1         1         1            1           15m

NAMESPACE   NAME           DESIRED   CURRENT   READY     AGE
default     rc/mysql       1         1         1         10h
default     rc/myweb       1         1         1         10h
default     rc/readiness   2         2         1         1h

NAMESPACE     NAME                       CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
default       svc/kubernetes             10.254.0.1       <none>        443/TCP          20h
default       svc/mysql                  10.254.233.103   <none>        3306/TCP         10h
default       svc/myweb                  10.254.96.244    <nodes>       8080:30008/TCP   10h
default       svc/readiness              10.254.164.201   <none>        80/TCP           1h
kube-system   svc/kube-dns               10.254.230.254   <none>        53/UDP,53/TCP    10h
kube-system   svc/kubernetes-dashboard   10.254.212.163   <none>        80/TCP           15m

NAMESPACE     NAME                                        DESIRED   CURRENT   READY     AGE
kube-system   rs/kube-dns-453684471                       1         1         1         10h
kube-system   rs/kubernetes-dashboard-latest-1012954252   1         1         1         15m

NAMESPACE     NAME                                              READY     STATUS             RESTARTS   AGE
default       po/busybox2                                       1/1       Running            1          10h
default       po/exec                                           0/1       CrashLoopBackOff   27         1h
default       po/httpget                                        1/1       Running            1          1h
default       po/mysql-3vhk0                                    1/1       Running            0          1h
default       po/myweb-bhl1v                                    1/1       Running            0          10h
default       po/readiness-q2cq7                                1/1       Running            0          1h
default       po/readiness-w9c2s                                0/1       Running            0          1h
kube-system   po/kube-dns-453684471-99j2v                       4/4       Running            0          10h
kube-system   po/kubernetes-dashboard-latest-1012954252-n16fw   1/1       Running            0          15m
[root@k8s-master dashboard]# 

#2.使用namespace
##2.1创建namespace
[root@k8s-master namespace]# kubectl create namespace qiangge
namespace "qiangge" created

[root@k8s-master namespace]# cat nginx-rc.yaml 
apiVersion: v1
kind: ReplicationController
metadata:
  name: myweb
  namespace: qiangge
spec:
  replicas: 2
  selector:
    app: myweb
  template:
    metadata:
      labels:
        app: myweb
    spec:
      containers:
      - name: myweb
        image: 10.0.0.11:5000/nginx:1.13
        ports:
        - containerPort: 80
[root@k8s-master namespace]# cat nginx-svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: myweb
  namespace: qiangge
spec:
  type: ClusterIP
  ports:
    - port: 80
      targetPort: 80
  selector:
    app: myweb

[root@k8s-master namespace]# 

##2.2批量创建pod
[root@k8s-master namespace]# kubectl create -f .
replicationcontroller "myweb" created
service "myweb" created

##2.3查看
[root@k8s-master namespace]# kubectl get all --namespace=qiangge
NAME       DESIRED   CURRENT   READY     AGE
rc/myweb   2         2         2         1m

NAME        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
svc/myweb   10.254.171.53   <none>        80/TCP    1m

NAME             READY     STATUS    RESTARTS   AGE
po/myweb-1jmfd   1/1       Running   0          1m
po/myweb-mx0sf   1/1       Running   0          1m

4.5proxyn代理访问

访问k8s中应用的方式
在svc中配置的,前面访问nginx我们都是使用的nodePort方式
第一种:NodePort类型 
type: NodePort
  ports:
    - port: 80
      targetPort: 80
      nodePort: 30008

在4.4例子中,我们创建的NGINX使用的是ClusterIP方式
第二种:ClusterIP类型
 type: ClusterIP
  ports:
    - port: 80
      targetPort: 80

http://10.0.0.11:8080/api/v1/proxy/namespaces/qiangge/services/myweb/

28-1.kubernetes

4.6heapster监控

#1.heapster监控,准备yml
[root@k8s-master heapster-influxdb]# ll
total 20
-rw-r--r-- 1 root root  414 Sep 14  2016 grafana-service.yaml
-rw-r--r-- 1 root root  682 Jul  1 11:20 heapster-controller.yaml
-rw-r--r-- 1 root root  249 Sep 14  2016 heapster-service.yaml
-rw-r--r-- 1 root root 1605 Jul  1 11:19 influxdb-grafana-controller.yaml
-rw-r--r-- 1 root root  259 Sep 14  2016 influxdb-service.yaml

[root@k8s-master heapster-influxdb]# cat grafana-service.yaml 
apiVersion: v1
kind: Service
metadata:
  labels:
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: monitoring-grafana
  name: monitoring-grafana
  namespace: kube-system
spec:
  # In a production setup, we recommend accessing Grafana through an external Loadbalancer
  # or through a public IP. 
  # type: LoadBalancer
  ports:
  - port: 80
    targetPort: 3000
  selector:
    name: influxGrafana

[root@k8s-master heapster-influxdb]# cat heapster-controller.yaml 
apiVersion: v1
kind: ReplicationController
metadata:
  labels:
    k8s-app: heapster
    name: heapster
    version: v6
  name: heapster
  namespace: kube-system
spec:
  replicas: 1
  selector:
    k8s-app: heapster
    version: v6
  template:
    metadata:
      labels:
        k8s-app: heapster
        version: v6
    spec:
      nodeSelector:
         kubernetes.io/hostname: k8s-master
      containers:
      - name: heapster
        image: kubernetes/heapster:canary
        imagePullPolicy: IfNotPresent
        command:
        - /heapster
        - --source=kubernetes:http://10.0.0.11:8080?inClusterConfig=false
        - --sink=influxdb:http://monitoring-influxdb:8086
[root@k8s-master heapster-influxdb]#
[root@k8s-master heapster-influxdb]# cat heapster-service.yaml 
apiVersion: v1
kind: Service
metadata:
  labels:
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: Heapster
  name: heapster
  namespace: kube-system
spec:
  ports:
  - port: 80
    targetPort: 8082
  selector:
    k8s-app: heapster
[root@k8s-master heapster-influxdb]# 

[root@k8s-master heapster-influxdb]# cat influxdb-grafana-controller.yaml 
apiVersion: v1
kind: ReplicationController
metadata:
  labels:
    name: influxGrafana
  name: influxdb-grafana
  namespace: kube-system
spec:
  replicas: 1
  selector:
    name: influxGrafana
  template:
    metadata:
      labels:
        name: influxGrafana
    spec:
      containers:
      - name: influxdb
        image: kubernetes/heapster_influxdb:v0.5
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /data
          name: influxdb-storage
      - name: grafana
        imagePullPolicy: IfNotPresent
        image: kubernetes/heapster_grafana:v2.6.0
        env:
          - name: INFLUXDB_SERVICE_URL
            value: http://monitoring-influxdb:8086
            # The following env variables are required to make Grafana accessible via
            # the kubernetes api-server proxy. On production clusters, we recommend
            # removing these env variables, setup auth for grafana, and expose the grafana
            # service using a LoadBalancer or a public IP.
          - name: GF_AUTH_BASIC_ENABLED
            value: "false"
          - name: GF_AUTH_ANONYMOUS_ENABLED
            value: "true"
          - name: GF_AUTH_ANONYMOUS_ORG_ROLE
            value: Admin
          - name: GF_SERVER_ROOT_URL
            value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/
        volumeMounts:
        - mountPath: /var
          name: grafana-storage
      nodeSelector:
         kubernetes.io/hostname: k8s-master
      volumes:
      - name: influxdb-storage
        emptyDir: {}
      - name: grafana-storage
        emptyDir: {}
[root@k8s-master heapster-influxdb]# 

[root@k8s-master heapster-influxdb]# cat influxdb-service.yaml 
apiVersion: v1
kind: Service
metadata:
  labels: null
  name: monitoring-influxdb
  namespace: kube-system
spec:
  ports:
  - name: http
    port: 8083
    targetPort: 8083
  - name: api
    port: 8086
    targetPort: 8086
  selector:
    name: influxGrafana
[root@k8s-master heapster-influxdb]# 

#2.准备镜像
[root@k8s-master heapster-influxdb]# docker image load -i docker_heapster_grafana.tar.gz 
c69ae1aa4698: Loading layer [==================================================>]   131 MB/131 MB
5f70bf18a086: Loading layer [==================================================>] 1.024 kB/1.024 kB
75a5b97e491c: Loading layer [==================================================>] 127.1 MB/127.1 MB
e188e2340071: Loading layer [==================================================>] 16.92 MB/16.92 MB
5e43af080be6: Loading layer [==================================================>] 55.81 kB/55.81 kB
7ecd917a174c: Loading layer [==================================================>] 4.096 kB/4.096 kB
Loaded image: docker.io/kubernetes/heapster_grafana:v2.6.0
[root@k8s-master heapster-influxdb]# docker image load -i docker_heapster_influxdb.tar.gz 
8ceab61e5aa8: Loading layer [==================================================>] 197.2 MB/197.2 MB
3c84ae1bbde2: Loading layer [==================================================>] 208.9 kB/208.9 kB
e8061ac24ae3: Loading layer [==================================================>] 4.608 kB/4.608 kB
5f70bf18a086: Loading layer [==================================================>] 1.024 kB/1.024 kB
58484cf9c5e7: Loading layer [==================================================>] 63.49 MB/63.49 MB
07d2297acddc: Loading layer [==================================================>] 4.608 kB/4.608 kB
Loaded image: docker.io/kubernetes/heapster_influxdb:v0.5
[root@k8s-master heapster-influxdb]# docker image load -i docker_heapster.tar.gz 
c12ecfd4861d: Loading layer [==================================================>] 130.9 MB/130.9 MB
5f70bf18a086: Loading layer [==================================================>] 1.024 kB/1.024 kB
998608e2fcd4: Loading layer [==================================================>] 45.16 MB/45.16 MB
591569fa6c34: Loading layer [==================================================>] 126.5 MB/126.5 MB
0b2fe2c6ef6b: Loading layer [==================================================>] 136.2 MB/136.2 MB
f9f3fb66a490: Loading layer [==================================================>] 322.9 MB/322.9 MB
6e2e798f8998: Loading layer [==================================================>]  2.56 kB/2.56 kB
21ac53bc7cd6: Loading layer [==================================================>] 5.632 kB/5.632 kB
7f96c89af577: Loading layer [==================================================>] 79.98 MB/79.98 MB
4371d588893a: Loading layer [==================================================>] 150.2 MB/150.2 MB
Loaded image: docker.io/kubernetes/heapster:canary

#3.创建pod
[root@k8s-master heapster-influxdb]# kubectl create -f .
service "monitoring-grafana" created
replicationcontroller "heapster" created
service "heapster" created
replicationcontroller "influxdb-grafana" created
service "monitoring-influxdb" created
[root@k8s-master heapster-influxdb]# 

#4.查看状态
[root@k8s-master heapster-influxdb]# kubectl get all --namespace=kube-system
NAME                                 DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/kube-dns                      1         1         1            1           11h
deploy/kubernetes-dashboard-latest   1         1         1            1           1h

NAME                  DESIRED   CURRENT   READY     AGE
rc/heapster           1         1         1         25m
rc/influxdb-grafana   1         1         1         25m

NAME                       CLUSTER-IP       EXTERNAL-IP   PORT(S)             AGE
svc/heapster               10.254.84.98     <none>        80/TCP              25m
svc/kube-dns               10.254.230.254   <none>        53/UDP,53/TCP       11h
svc/kubernetes-dashboard   10.254.212.163   <none>        80/TCP              1h
svc/monitoring-grafana     10.254.145.117   <none>        80/TCP              25m
svc/monitoring-influxdb    10.254.57.213    <none>        8083/TCP,8086/TCP   25m

NAME                                        DESIRED   CURRENT   READY     AGE
rs/kube-dns-453684471                       1         1         1         11h
rs/kubernetes-dashboard-latest-1012954252   1         1         1         1h

NAME                                              READY     STATUS    RESTARTS   AGE
po/heapster-kc5qm                                 1/1       Running   0          25m
po/influxdb-grafana-s1b9p                         2/2       Running   0          25m
po/kube-dns-453684471-99j2v                       4/4       Running   4          11h
po/kubernetes-dashboard-latest-1012954252-n16fw   1/1       Running   0          1h
[root@k8s-master heapster-influxdb]# 

28-1.kubernetes

监控数据获得,是从cadvisor获得的,现在cadvisor集成到了kubelete中,外不要想访问cadvisor,可以修改kubelet配置文件,添加--cadvisor参数
这里在master节点操作不行,访问8080端口是apiserver,所以在node2上操作了
[root@k8s-node2 ~]# cat /etc/kubernetes/kubelet 
# Add your own!
KUBELET_ARGS="--cluster_dns=10.254.230.254 --cluster_domain=cluster.local --cadvisor-port=8080"
重启kubelete
[root@k8s-node2 ~]# vim /etc/kubernetes/kubelet 
[root@k8s-node2 ~]# systemctl restart kubelet.service 
[root@k8s-node2 ~]# 

28-1.kubernetes
28-1.kubernetes
28-1.kubernetes

4.7k8s弹性伸缩hpa

#1.删除default namespace下的内容
[root@k8s-master k8s]# kubectl delete rc readiness
replicationcontroller "readiness" deleted
[root@k8s-master k8s]# kubectl delete rc mysql
replicationcontroller "mysql" deleted
[root@k8s-master k8s]# kubectl delete rc myweb
replicationcontroller "myweb" deleted
[root@k8s-master k8s]# kubectl delete pod httpget
pod "httpget" deleted
[root@k8s-master k8s]# kubectl delete pod exec
pod "exec" deleted
[root@k8s-master k8s]# kubectl delete pod busybox2
pod "busybox2" deleted
[root@k8s-master k8s]# kubectl delete svc mysql
service "mysql" deleted
[root@k8s-master k8s]# kubectl delete svc myweb
service "myweb" deleted
[root@k8s-master k8s]# kubectl delete svc readiness
service "readiness" deleted
[root@k8s-master k8s]# kubectl get all
NAME             CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
svc/kubernetes   10.254.0.1   <none>        443/TCP   21h

NAME          READY     STATUS        RESTARTS   AGE
po/busybox2   1/1       Terminating   3          12h
[root@k8s-master k8s]# kubectl get pods
No resources found.
[root@k8s-master k8s]# kubectl get all
NAME             CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
svc/kubernetes   10.254.0.1   <none>        443/TCP   21h
[root@k8s-master k8s]# 

#2.准备yml,加入资源限制的内容resources:下面的内容,方便测试
[root@k8s-master hpa]# cat nginx-rc.yaml 
apiVersion: v1
kind: ReplicationController
metadata:
  name: myweb
spec:
  replicas: 2
  selector:
    app: myweb
  template:
    metadata:
      labels:
        app: myweb
    spec:
      containers:
      - name: myweb
        image: 10.0.0.11:5000/nginx:1.13
        ports:
        - containerPort: 80
        resources:
          limits:
            cpu: 100m
            memory: 50Mi
          requests:
            cpu: 100m
            memory: 50Mi

[root@k8s-master hpa]# 

#3.创建pod
[root@k8s-master hpa]# kubectl create -f nginx-rc.yaml 
replicationcontroller "myweb" created
[root@k8s-master hpa]# kubectl get all
NAME       DESIRED   CURRENT   READY     AGE
rc/myweb   2         2         2         18s

NAME             CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
svc/kubernetes   10.254.0.1   <none>        443/TCP   22h

NAME             READY     STATUS    RESTARTS   AGE
po/myweb-9khsv   1/1       Running   0          18s
po/myweb-q989x   1/1       Running   0          18s
[root@k8s-master hpa]# 

#4.创建hpa
使用的是replication,就是replicationcontroller
使用的是deployment,就是deploymentcontroller
--max=8 --min=1 设置最大最少pod数
--cpu-percent=10  cpu超过10%,就增加pod
kubectl autoscale replicationcontroller myweb --max=8 --min=1 --cpu-percent=10

[root@k8s-master hpa]# kubectl autoscale replicationcontroller myweb --max=8 --min=1 --cpu-percent=10
replicationcontroller "myweb" autoscaled
[root@k8s-master hpa]# kubectl get all
NAME        REFERENCE                     TARGET    CURRENT   MINPODS   MAXPODS   AGE
hpa/myweb   ReplicationController/myweb   10%       0%        1         8         9s

NAME       DESIRED   CURRENT   READY     AGE
rc/myweb   1         1         1         3m

NAME             CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
svc/kubernetes   10.254.0.1   <none>        443/TCP   22h

NAME             READY     STATUS    RESTARTS   AGE
po/myweb-9khsv   1/1       Running   0          3m
[root@k8s-master hpa]# 

#5.压力测试:
[root@k8s-master hpa]# kubectl get pod -o wide
NAME          READY     STATUS    RESTARTS   AGE       IP            NODE
myweb-9khsv   1/1       Running   0          5m        172.16.79.2   k8s-node1
一共发起50000次请求,每次1000个请求,请求数可以根据自己的资源情况调整
ab -n 500000 -c 1000 http://172.16.79.2

#6.查看
[root@k8s-master hpa]# kubectl describe hpa
Name:               myweb
Namespace:          default
Labels:             <none>
Annotations:            <none>
CreationTimestamp:      Thu, 29 Aug 2019 12:21:20 +0800
Reference:          ReplicationController/myweb
Target CPU utilization:     10%
Current CPU utilization:    44%
Min replicas:           1
Max replicas:           8
ReplicationController pods: 4 current / 4 desired
Events:
  FirstSeen LastSeen    Count   From                SubObjectPath   Type        Reason          Message
  --------- --------    -----   ----                -------------   --------    ------          -------
  10m       10m     2   {horizontal-pod-autoscaler }            Normal      DesiredReplicasComputed Computed the desired num of replicas: 0 (avgCPUutil: 0, current replicas: 2)
  10m       10m     1   {horizontal-pod-autoscaler }            Normal      SuccessfulRescale   New size: 1; reason: All metrics below target
  10m       5m      11  {horizontal-pod-autoscaler }            Normal      DesiredReplicasComputed Computed the desired num of replicas: 0 (avgCPUutil: 0, current replicas: 1)
  5m        5m      2   {horizontal-pod-autoscaler }            Normal      DesiredReplicasComputed Computed the desired num of replicas: 2 (avgCPUutil: 17, current replicas: 1)
  5m        5m      1   {horizontal-pod-autoscaler }            Normal      SuccessfulRescale   New size: 2; reason: CPU utilization above target
  4m        3m      3   {horizontal-pod-autoscaler }            Normal      DesiredReplicasComputed Computed the desired num of replicas: 9 (avgCPUutil: 86, current replicas: 2)
  3m        2m      3   {horizontal-pod-autoscaler }            Normal      DesiredReplicasComputed Computed the desired num of replicas: 9 (avgCPUutil: 42, current replicas: 2)
  2m        1m      3   {horizontal-pod-autoscaler }            Normal      DesiredReplicasComputed Computed the desired num of replicas: 9 (avgCPUutil: 43, current replicas: 2)
  1m        1m      3   {horizontal-pod-autoscaler }            Normal      DesiredReplicasComputed Computed the desired num of replicas: 8 (avgCPUutil: 39, current replicas: 2)
  1m        1m      1   {horizontal-pod-autoscaler }            Normal      SuccessfulRescale   New size: 4; reason: CPU utilization above target
  51s       51s     2   {horizontal-pod-autoscaler }            Normal      DesiredReplicasComputed Computed the desired num of replicas: 8 (avgCPUutil: 39, current replicas: 4)
  21s       21s     2   {horizontal-pod-autoscaler }            Normal      DesiredReplicasComputed Computed the desired num of replicas: 9 (avgCPUutil: 44, current replicas: 4)
[root@k8s-master hpa]# 

28-1.kubernetes
28-1.kubernetes

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值