简单易懂的Kubernetes(K8S)之Pod资源管理与harbor创建

18 篇文章 0 订阅
13 篇文章 0 订阅

一、Pod资源管理

特点:

最小部署单元

一组容器的集合

一个Pod中的容器共享网络命名空间

Pod是短暂的

Pod容器分类:

1:infrastructure container 基础容器

//维护整个Pod网络空间

//node节点操作

//查看容器的网络

[root@node1 ~]# cat /opt/kubernetes/cfg/kubelet

KUBELET_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=192.168.200.40 \
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
--config=/opt/kubernetes/cfg/kubelet.config \
--cert-dir=/opt/kubernetes/ssl \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"

//每次创建Pod时候就会创建,与Pod对应的,对于用户是透明的

[root@node1 ~]# docker ps -a
bf3a30ca89ca   registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0   "/pause"                 45 hours ago   Up 45 hours                         k8s_POD_nginx-deployment-d55b94fd-gc2xv_default_14a6635c-9e86-11eb-8f82-000c29c8b18e_0

2:initcontainers 初始化容器

//先于业务容器开始执行,原先Pod中容器是并行开启,现在进行了改进

3:container 业务容器

//并行启动

官方网站

https://kubernetes.io/docs/concepts/workloads/pods/init-containers/

示例:
在这里插入图片描述

镜像拉取策略(image PullPolicy)

IfNotPresent:默认值,镜像在宿主机上不存在时才拉取

Always:每次创建Pod都会重新拉取一次镜像

Never:Pod永远不会主动拉取这个镜像

https://kubernetes.io/docs/concepts/containers/images

示例:

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-oUzENrTC-1618725755656)(C:\Users\gcc\AppData\Roaming\Typora\typora-user-images\1618719803154.png)]
//master01操作

[root@master1 ~]# kubectl edit deployment/nginx-deployment

# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  annotations:
    deployment.kubernetes.io/revision: "1"
  creationTimestamp: 2021-04-16T07:33:47Z
  generation: 1
  labels:
    app: nginx
  name: nginx-deployment
  namespace: default
  resourceVersion: "118322"
  selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/nginx-deployment
  uid: 14a14cdb-9e86-11eb-98d8-000c295d78bd
"/tmp/kubectl-edit-xuilj.yaml" 70L, 1960C

[root@master1 ~]# cd demo/
[root@master1 demo]# ls
my-deployment.yaml     nginx-service.yaml     nginx-deployment.yaml

[root@master1 demo]# vim pod1.yaml
apiVersion: v1
kind: Pod
metadata:
  name: mypod
spec:
  containers:
    - name: nginx
      image: nginx
      imagePullPolicy: Always
      command: [ "echo", "SUCCESS" ]

[root@master1 demo]# kubectl create -f pod1.yaml 
pod/mypod created


[root@master1 demo]# kubectl get pods
NAME                              READY   STATUS      RESTARTS   AGE
mypod                             0/1     Completed   0          16s


[root@master1 demo]# kubectl describe pod mypod
Name:               mypod
Namespace:          default
Priority:           0
PriorityClassName:  <none>
Node:               192.168.200.60/192.168.200.60
Start Time:         Sun, 18 Apr 2021 12:35:17 +0800
Labels:             <none>
Annotations:        <none>
Status:             Running
IP:                 172.17.86.4
Containers:
  nginx:
    Container ID:  docker://0a293e5411624d3ac13ac53749787d518ecf104f8073075905dd7c12042c59ce
    Image:         nginx
    Image ID:      docker-pullable://nginx@sha256:75a55d33ecc73c2a242450a9f1cc858499d468f077ea942867e662c247b5e412
    Port:          <none>
    Host Port:     <none>
    Command:
      echo
      SUCCESS
    State:          Waiting
      Reason:       CrashLoopBackOff
    Last State:     Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Sun, 18 Apr 2021 12:36:48 +0800
      Finished:     Sun, 18 Apr 2021 12:36:48 +0800
    Ready:          False
    Restart Count:  3
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-697gb (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  default-token-697gb:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-697gb
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                     Message
  ----     ------     ----                ----                     -------
  Normal   Scheduled  112s                default-scheduler        Successfully assigned default/mypod to 192.168.200.60
  Normal   Pulling    40s (x4 over 112s)  kubelet, 192.168.200.60  pulling image "nginx"
  Normal   Pulled     24s (x4 over 111s)  kubelet, 192.168.200.60  Successfully pulled image "nginx"
  Normal   Created    24s (x4 over 111s)  kubelet, 192.168.200.60  Created container
  Normal   Started    24s (x4 over 111s)  kubelet, 192.168.200.60  Started container
  Warning  BackOff    12s (x6 over 94s)   kubelet, 192.168.200.60  Back-off restarting failed container
  
  
[root@master1 demo]# kubectl get pods
NAME                              READY   STATUS             RESTARTS   AGE
mypod                             0/1     CrashLoopBackOff   5          4m14s
nginx-deployment-d55b94fd-gc2xv   1/1     Running            0          45h
nginx-deployment-d55b94fd-wt5g7   1/1     Running            1          45h
nginx-deployment-d55b94fd-xwxsr   1/1     Running            0          45h

//失败的状态的原因是因为命令启动冲突

删除 command: [ “echo”, “SUCCESS” ]

//同时更改一下版本

image: nginx:1.14

//删除原有的资源

[root@master1 demo]# kubectl delete -f pod1.yaml 
pod "mypod" deleted
[root@master1 demo]# kubectl get pods
NAME                              READY   STATUS    RESTARTS   AGE
nginx-deployment-d55b94fd-gc2xv   1/1     Running   0          45h
nginx-deployment-d55b94fd-wt5g7   1/1     Running   1          45h
nginx-deployment-d55b94fd-xwxsr   1/1     Running   0          45h

//更新资源

[root@master1 demo]# vim pod1.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: mypod
spec:
  containers:
    - name: nginx
      image: nginx:1.14
      imagePullPolicy: Always


[root@master1 demo]#  kubectl apply -f pod1.yaml
pod/mypod created

[root@master1 demo]# kubectl get pods
NAME                              READY   STATUS    RESTARTS   AGE
mypod                             1/1     Running   0          18s
nginx-deployment-d55b94fd-gc2xv   1/1     Running   0          45h
nginx-deployment-d55b94fd-wt5g7   1/1     Running   1          45h
nginx-deployment-d55b94fd-xwxsr   1/1     Running   0          45h

//查看分配节点

[root@master1 demo]#  kubectl get pods -o wide
NAME                              READY   STATUS    RESTARTS   AGE     IP            NODE             NOMINATED NODE
mypod                             1/1     Running   0          2m29s   172.17.86.4   192.168.200.60   <none>
nginx-deployment-d55b94fd-gc2xv   1/1     Running   0          45h     172.17.63.3   192.168.200.40   <none>
nginx-deployment-d55b94fd-wt5g7   1/1     Running   1          45h     172.17.86.3   192.168.200.60   <none>
nginx-deployment-d55b94fd-xwxsr   1/1     Running   0          45h     172.17.63.2   192.168.200.40   <none>

//在任意node节点使用curl 查看头部信息

//node节点操作

[root@node2 ~]# curl 172.17.86.4
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
......

二、 部署harbor创建私有项目

1、首先需要安装docker引擎

yum -y install yum-utils device-mapper-persistent-data lvm2

yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum makecache fast
yum -y install docker-ce docker-ce-cli containerd.io 

systemctl start docker
systemctl enable docker

tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://p3pqujwc.mirror.aliyuncs.com"]
}
EOF

systemctl daemon-reload
systemctl restart docker

echo 'net.ipv4.ip_forward=1' >> /etc/sysctl.conf

sysctl -p

service network restart
systemctl restart docker

//安装docker-compose并检查版本判断是否安装成功

[root@harbor ~]# cd /usr/local/bin/
[root@harbor bin]# rz

[root@harbor bin]# ls
docker-compose

chmod +x /usr/local/bin/docker-compose
docker-compose -v

//下载habor安装程序

wget http:// harbor.orientsoft.cn/harbor-1.2.2/harbor-offline-installer-v1.2.2.tgz

tar zxvf harbor-offline-installer-v1.2.2.tgz -C /usr/local/

//配置harbor参数文件

vim /usr/local/harbor/harbor.cfg

hostname = 192.168.200.90     #第五行修改为主机IP

//启动harbor

sh /usr/local/harbor/install.sh

// 如果一切都正常,应该可以打开浏览器访问 http://192.168.200.90 的管理页面,默认 的管理员用户名和密码是 admin/Harbor12345。
在这里插入图片描述

2、新建harbor私有项目

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-lMfNe54S-1618725755665)(C:\Users\gcc\AppData\Roaming\Typora\typora-user-images\1618722647257.png)]

3、node节点配置连接私有仓库(注意后面的逗号要添加)

[root@node1 ~]# vim /etc/docker/daemon.json
{
  "registry-mirrors": ["https://p3pqujwc.mirror.aliyuncs.com"],
   "insecure-registries":["192.168.200.90"]
}

[root@node1 ~]# systemctl restart docker.service 

4、登录harbor私有仓库

[root@node1 ~]#  docker login 192.168.200.90
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded

5、下载Tomcat镜像进行推送

[root@node1 ~]#  docker pull tomcat:8.0.52

//推送格式

 docker tag SOURCE_IMAGE[:TAG] 192.168.195.80/project/IMAGE[:TAG]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-iAx1jnPN-1618725755666)(C:\Users\gcc\AppData\Roaming\Typora\typora-user-images\1618723304308.png)]

//打标签

[root@node1 ~]# docker tag tomcat:8.0.52 192.168.200.90/myproject-gcc/tomcat:v1

//推送成功

[root@node1 ~]# docker push 192.168.200.90/myproject-gcc/tomcat:v1 

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-YzyOxGA0-1618725755667)(C:\Users\gcc\AppData\Roaming\Typora\typora-user-images\1618723484942.png)]

6、node2也需要登录harbor

[root@node2 ~]# docker login 192.168.200.90
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded

7、在master01上创建凭据资源

首先在node节点上查看凭证资源

[root@node1 ~]# cd .docker/
[root@node1 .docker]# ls
config.json

[root@node1 .docker]# cat config.json | base64 -w 0
ewoJImF1dGhzIjogewoJCSIxOTIuMTY4LjIwMC45MCI6IHsKCQkJImF1dGgiOiAiWVdSdGFXNDZTR0Z5WW05eU1USXpORFU9IgoJCX0KCX0KfQ==

然后在master01上创建凭证资源

[root@master1 demo]# vim registry-pull-secret.yaml
apiVersion: v1
kind: Secret
metadata:
  name: registry-pull-secret
data:  .dockerconfigjson: ewoJImF1dGhzIjogewoJCSIxOTIuMTY4LjIwMC45MCI6IHsKCQkJImF1dGgiOiAiWVdSdGFXNDZTR0Z5WW05eU1USXpORFU9IgoJCX0KCX0KfQ==
type: kubernetes.io/dockerconfigjson

8、创建secret资源

[root@master1 demo]# kubectl create -f registry-pull-secret.yaml

9、查看secret资源

[root@master1 demo]# kubectl get secret
NAME                   TYPE                                  DATA   AGE
default-token-697gb    kubernetes.io/service-account-token   3      4d19h
registry-pull-secret   kubernetes.io/dockerconfigjson        1      31s

10、创建资源从harbor中下载镜像

[root@master1 demo]#  vim tomcat-deployment.yaml

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: my-tomcat
spec:
  replicas: 2
  template:
    metadata:
      labels:
        app: my-tomcat
    spec:
      imagePullSecrets:
      - name: registry-pull-secret
      containers:
      - name: my-tomcat
        image: 192.168.200.90/myproject-gcc/tomcat:v1
        ports:
        - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: my-tomcat
spec:
  type: NodePort
  ports:
  - port: 8080
    targetPort: 8080
    nodePort: 31111
  selector:
    app: my-tomcat

[root@master1 demo]# kubectl create -f tomcat-deployment.yaml

11、私有仓库中的镜像被下载了2次

[root@master1 demo]# kubectl get pods
NAME                              READY   STATUS    RESTARTS   AGE
my-tomcat-5674fb9b47-7z9qn        1/1     Running   0          100s
my-tomcat-5674fb9b47-hxnjc        1/1     Running   0          100s
mypod                             1/1     Running   1          59m
nginx-deployment-d55b94fd-gc2xv   1/1     Running   1          46h
nginx-deployment-d55b94fd-wt5g7   1/1     Running   2          46h
nginx-deployment-d55b94fd-xwxsr   1/1     Running   1          46h


[root@master1 demo]# kubectl describe pod my-tomcat-5674fb9b47-7z9qn
Name:               my-tomcat-5674fb9b47-7z9qn
Namespace:          default
Priority:           0
PriorityClassName:  <none>
Node:               192.168.200.40/192.168.200.40
Start Time:         Sun, 18 Apr 2021 13:41:12 +0800
Labels:             app=my-tomcat
                    pod-template-hash=5674fb9b47
Annotations:        <none>
Status:             Running
IP:                 172.17.92.4
Controlled By:      ReplicaSet/my-tomcat-5674fb9b47
Containers:
  my-tomcat:
    Container ID:   docker://26488fbe8429cf6dda3e78fd2a87b66391fece4fa882c1ed67082add02524f21
    Image:          192.168.200.90/myproject-gcc/tomcat:v1
    Image ID:       docker-pullable://192.168.200.90/myproject-gcc/tomcat@sha256:f3cfaf433cb95dafca20143ba99943249ab830d0aca484c89ffa36cf2a9fb4c9
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Sun, 18 Apr 2021 13:41:14 +0800
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-697gb (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  default-token-697gb:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-697gb
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type    Reason     Age    From                     Message
  ----    ------     ----   ----                     -------
  Normal  Scheduled  2m52s  default-scheduler        Successfully assigned default/my-tomcat-5674fb9b47-7z9qn to 192.168.200.40
  Normal  Pulled     2m50s  kubelet, 192.168.200.40  Container image "192.168.200.90/myproject-gcc/tomcat:v1" already present on machine
  Normal  Created    2m50s  kubelet, 192.168.200.40  Created container
  Normal  Started    2m50s  kubelet, 192.168.200.40  Started container

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-oxlgcG6e-1618725755668)(C:\Users\gcc\AppData\Roaming\Typora\typora-user-images\1618724513328.png)]

  • 2
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

清风~

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值