K8S hello world

 

我的实验环境是redhat 7.1 + K8S all in one + binary install

 

组件ip
etcd; kube-apiserver; kube-controller-manager; kube-scheduler9.21.62.200
kube-proxy; kubelet9.21.62.200

 

 

1. 安装docker

https://get.docker.com/rpm/1.7.1/centos-7/RPMS/x86_64/docker-engine-1.7.1-1.el7.centos.x86_64.rpm

2. 安装etcd

https://github.com/coreos/etcd/releases/download/v2.0.11/etcd-v2.0.11-linux-amd64.tar.gz

3. 安装K8S

   1) download k8s from https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v0.16.1/kubernetes.tar.gz

   2) install tree

 [root@xlhu2 bin]# ll /root/k8s/
total 101360
-rw-r--r-- 1 root root   4735688 Jul 14  2015 docker-engine-1.7.1-1.el7.centos.x86_64.rpm
drwxr-xr-x 3  501 games      117 Jan 21 02:53 etcd-v0.4.6-linux-amd64
-rw-r--r-- 1 root root   6022487 Oct 29  2014 etcd-v0.4.6-linux-amd64.tar.gz
drwxr-xr-x 7 root wheel      130 Jan 21 03:05 kubernetes
-rw-r--r-- 1 root root  93016034 May  1  2015 kubernetes.tar.gz
-rw-r--r-- 1 root root       714 Jan 22 03:00 replicationcontroller.json
-rw-r--r-- 1 root root       409 Jan 22 02:42 test-pod.json
-rw-r--r-- 1 root root       175 Jan 22 03:14 test-svc.json
[root@xlhu2 bin]#

      3)  运行kube-apiserver

   

./kube-apiserver --address=0.0.0.0  --insecure-port=8080 --portal_net="127.0.0.0/16" --log_dir=/var/log/kube  --kubelet_port=10250 --v=0  --logtostderr=false --etcd_servers=http://127.0.0.1:4001 --allow_privileged=false
 

 

    4) 运行kube-controller-manager

 

./kube-controller-manager  --v=0 --logtostderr=false --log_dir=/var/log/kube  --master=127.0.0.1:8080 --machines=127.0.0.1
 

 

    5) 运行kube-scheduler

   

./kube-scheduler  --master=127.0.0.1:8080  --v=0  --log_dir=/var/log/kube
  

 

   6) 运行kube-proxy

  

./kube-proxy  --logtostderr=false  --v=0  --master=http://127.0.0.1:8080 
   

 

   7) 运行kubelet

   

./kubelet  --logtostderr=false  --v=0  --allow-privileged=false   --log_dir=/var/log/kube  --address=127.0.0.1  --port=10250  --hostname_override=127.0.0.1   --api_servers=http://127.0.0.1:8080
    8) 创建pod

 

      a. create json file
{
  "id": "fedoraapache",
  "kind": "Pod",
  "apiVersion": "v1beta1",
  "desiredState": {
    "manifest": {
      "version": "v1beta1",
      "id": "fedoraapache",
      "containers": [{
        "name": "fedoraapache",
        "image": "fedora/apache",
        "ports": [{
          "containerPort": 80,
          "hostPort": 8080
        }]
      }]
    }
  },
  "labels": {
    "name": "fedoraapache"
  }
}
    b.  create pod
./kubectl create -f test-pod.json
    c. check result
   
[root@xlhu2 bin]# cd /root/k8s/kubernetes/server/kubernetes/server/bin
[root@xlhu2 bin]# ./kubectl get pods
POD            IP            CONTAINER(S)   IMAGE(S)        HOST                  LABELS              STATUS    CREATED   MESSAGE
fedoraapache   172.17.0.39                                  127.0.0.1/127.0.0.1   name=fedoraapache   Running   2 hours   
                             fedoraapache   fedora/apache                                             Running   2 hours   
[root@xlhu2 bin]# docker ps
CONTAINER ID        IMAGE                                  COMMAND             CREATED             STATUS              PORTS                  NAMES
7dec2cb57b83        fedora/apache                          "/run-apache.sh"    2 hours ago         Up 2 hours                                 k8s_fedoraapache.a1850cda_fedoraapache_default_c1f162b6-c29a-11e5-a9f3-525400a5a3b1_b1512d92   
2b452bfc0bab        gcr.io/google_containers/pause:0.8.0   "/pause"            2 hours ago         Up 2 hours          0.0.0.0:8090->80/tcp   k8s_POD.f60e046f_fedoraapache_default_c1f162b6-c29a-11e5-a9f3-525400a5a3b1_9aa22d2f            
[root@xlhu2 bin]# curl localhost:8090
Apache
[root@xlhu2 bin]# 
   
   9) 创建ReplicationController
 Replication Controller是Kubernetes系统中最有用的功能,实现复制多个Pod副本,往往一个应用需要多个Pod来支撑,并且可以保证其复制的副本数,即使副本所调度分配的主宿机出现异常,通过Replication Controller可以保证在其它主宿机启用同等数量的Pod。Replication Controller可以通过repcon模板来创建多个Pod副本,同样也可以直接复制已存在Pod,需要通过Label selector来关联
    a. create json file
   
{
    "id": "lianjiatest.com",
    "apiVersion": "v1beta1",
    "kind": "ReplicationController",
    "desiredState": {
      "replicas": 5,
      "replicaSelector": {"name": "liutest"},
      "podTemplate": {
        "desiredState": {
           "manifest": {
             "version": "v1beta1",
             "id": "apacheserver",
             "containers": [{
               "name": "apachetest",
               "image": "fedora/apache",
               "imagePullPolicy": "PullIfNotPresent",
               "ports": [{
                   "containerPort": 80
               }]
             }]
           }
         },
         "labels": {"name": "liutest"}
        }},
    "labels": {"name": "replicationtest"}
  }
      b.  create 5 个副本的  pod
     
./kubectl create -f replicationcontroller.json
 
    c. check result
    
[root@xlhu2 bin]# ./kubectl create -f /root/k8s/replicationcontroller.json 
replicationControllers/lianjiatest.com
[root@xlhu2 bin]# ./kubectl get pods
POD                     IP            CONTAINER(S)   IMAGE(S)        HOST                  LABELS              STATUS    CREATED     MESSAGE
fedoraapache            172.17.0.39                                  127.0.0.1/127.0.0.1   name=fedoraapache   Running   2 hours     
                                      fedoraapache   fedora/apache                                             Running   2 hours     
lianjiatest.com-0suix                                                127.0.0.1/            name=liutest        Pending   6 seconds   
                                      apachetest     fedora/apache                                                       
lianjiatest.com-2k5pl   172.17.0.40                                  127.0.0.1/127.0.0.1   name=liutest        Running   6 seconds   
                                      apachetest     fedora/apache                                             Running   4 seconds   
lianjiatest.com-otn5w   172.17.0.43                                  127.0.0.1/127.0.0.1   name=liutest        Running   6 seconds   
                                      apachetest     fedora/apache                                             Running   2 seconds   
lianjiatest.com-p4nim   172.17.0.42                                  127.0.0.1/127.0.0.1   name=liutest        Running   6 seconds   
                                      apachetest     fedora/apache                                             Running   3 seconds   
lianjiatest.com-t7hn1   172.17.0.41                                  127.0.0.1/127.0.0.1   name=liutest        Running   6 seconds   
                                      apachetest     fedora/apache                                             Running   3 seconds   
[root@xlhu2 bin]# docker ps
CONTAINER ID        IMAGE                                  COMMAND             CREATED             STATUS              PORTS                  NAMES
2abbc1781b99        fedora/apache                          "/run-apache.sh"    11 seconds ago      Up 8 seconds                               k8s_apachetest.75f50b88_lianjiatest.com-0suix_default_e4ddc3de-c2ac-11e5-b1dd-525400a5a3b1_0771d52c   
4ab5778a9ad6        fedora/apache                          "/run-apache.sh"    11 seconds ago      Up 8 seconds                               k8s_apachetest.75f50b88_lianjiatest.com-otn5w_default_e4dd3625-c2ac-11e5-b1dd-525400a5a3b1_b2d65e5d   
5ad5b3b60d38        fedora/apache                          "/run-apache.sh"    11 seconds ago      Up 9 seconds                               k8s_apachetest.75f50b88_lianjiatest.com-p4nim_default_e4ddf207-c2ac-11e5-b1dd-525400a5a3b1_9ad86417   
ab616eacacf4        fedora/apache                          "/run-apache.sh"    12 seconds ago      Up 9 seconds                               k8s_apachetest.75f50b88_lianjiatest.com-t7hn1_default_e4ddfce7-c2ac-11e5-b1dd-525400a5a3b1_3568fa44   
a9be9c705726        fedora/apache                          "/run-apache.sh"    12 seconds ago      Up 10 seconds                              k8s_apachetest.75f50b88_lianjiatest.com-2k5pl_default_e4dd545e-c2ac-11e5-b1dd-525400a5a3b1_6140f4dc   
99c857266bd6        gcr.io/google_containers/pause:0.8.0   "/pause"            13 seconds ago      Up 10 seconds                              k8s_POD.d41d03ce_lianjiatest.com-0suix_default_e4ddc3de-c2ac-11e5-b1dd-525400a5a3b1_265b8238          
8a529706a844        gcr.io/google_containers/pause:0.8.0   "/pause"            13 seconds ago      Up 10 seconds                              k8s_POD.d41d03ce_lianjiatest.com-otn5w_default_e4dd3625-c2ac-11e5-b1dd-525400a5a3b1_653ca41d          
5dea06978306        gcr.io/google_containers/pause:0.8.0   "/pause"            13 seconds ago      Up 11 seconds                              k8s_POD.d41d03ce_lianjiatest.com-p4nim_default_e4ddf207-c2ac-11e5-b1dd-525400a5a3b1_8e2ec53c          
20dab1b797db        gcr.io/google_containers/pause:0.8.0   "/pause"            13 seconds ago      Up 11 seconds                              k8s_POD.d41d03ce_lianjiatest.com-t7hn1_default_e4ddfce7-c2ac-11e5-b1dd-525400a5a3b1_17e70e3b          
b32e94be7ac4        gcr.io/google_containers/pause:0.8.0   "/pause"            13 seconds ago      Up 11 seconds                              k8s_POD.d41d03ce_lianjiatest.com-2k5pl_default_e4dd545e-c2ac-11e5-b1dd-525400a5a3b1_64468c87          
7dec2cb57b83        fedora/apache                          "/run-apache.sh"    2 hours ago         Up 2 hours                                 k8s_fedoraapache.a1850cda_fedoraapache_default_c1f162b6-c29a-11e5-a9f3-525400a5a3b1_b1512d92          
2b452bfc0bab        gcr.io/google_containers/pause:0.8.0   "/pause"            2 hours ago         Up 2 hours          0.0.0.0:8090->80/tcp   k8s_POD.f60e046f_fedoraapache_default_c1f162b6-c29a-11e5-a9f3-525400a5a3b1_9aa22d2f                   
[root@xlhu2 bin]# 
    OK! 即使在slave删掉其中的几个,也会迅速补充到5个~~~~
    10) create service
        Services是Kubernetes最外围的单元,通过虚拟一个访问IP及服务端口,可以访问我们定义好的Pod资源,目前的版本是通过iptables的nat转发来实现,转发的目标端口为Kube_proxy生成的随机端口
     a. create json file
    
{
  "id": "webserver",
  "kind": "Service",
  "apiVersion": "v1beta1",
  "selector": {
    "name": "liutest"
  },
  "protocol": "TCP",
  "containerPort": 80,
  "port": 8080
}
       b. create service and check result
   
[root@xlhu2 bin]# ./kubectl create -f /root/k8s/test-svc.json
services/webserver
[root@xlhu2 bin]# ./kubectl get svc
NAME            LABELS                                    SELECTOR       IP             PORT(S)
kubernetes      component=apiserver,provider=kubernetes   <none>         127.0.0.2      443/TCP
kubernetes-ro   component=apiserver,provider=kubernetes   <none>         127.0.0.1      80/TCP
webserver       <none>                                    name=liutest   127.0.33.201   8080/TCP
[root@xlhu2 bin]# curl 127.0.33.201:8080
Apache
[root@xlhu2 bin]#
 
   最后总结下注意点:
在replicationcontronllers.json中,"replicaSelector": {"name": "XXXXXX"}要与"labels": {"name": "XXXXXXX"}以及service中的"selector": {"name": "XXXXXXX"}保持一致;
 
Reference:
  http://segmentfault.com/a/119000000288679
 
 
 
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
最近的三年多时间,随着容器技术的火爆及Kubernetes成为容器编排管理的标准,国内外厂商均已开始了全面拥抱Kubernetes的转型, 无数中小型企业已经落地 Kubernetes,或正走在容器化的道路上 。 第一章介绍docker的前世今生,了 解docker的实现原理,以Django项目为例,教大家如何编写最佳的Dockerfile实现构业务镜像的制作。通过本章的学习,大家会知道docker的概念及基本操作,并学会构建自己的业务镜像,并通过抓包的方式掌握Docker最常用的bridge网络模式的通信。 第二章本章学习kubernetes的架构及工作流程,重点介绍如本章学习kubernetes的架构及工作流程,重点介绍如断的滚动更新,通过服务发现来实现集群内部的服务间访问,并通过ingress- -nginx实现外部使用域名访问集群内部的服务。同时介绍基于EFK如何搭建Kubernetes集群的日志收集系统。学完本章,我们的Django demo项目已经可以运行在k8s集群中,同时我们可以使用域名进行服务的访问。第三章本章基于k8s集群部署gitlab、sonarQube、 Jenkins等工具,并把上述工具集成到Jenkins中,以Django项目为例,通过多分支流水线及Jenkinsfle实现项目代码提交到不同的仓库分支,实现自动代码扫描、单元测试、docker容器构建、k8s服务的自动部署。第四章由于公司内部项目众多,大量的项目使用同一套流程做CICD,那么势必会存在大量的重复代码,因此本章主要通过使用groovy实现Jenkins的sharedL ibrary的开发,以提取项目在CICD实践过程中的公共逻辑,提供一系列的流程的接口供公司内各项目调用,开发完成后,还是以Django的demo项目为例,进行Jenkinsfle的改造,最后仅需通过简单的Jenkinsfle的配置,即可优雅的完成CICD流程的整个过程,此方式已在大型企业内部落地应用。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值