【docker】【kubernetes】K8S部署及web展示

K8S部署及web展示
虚拟机配置
环境变量、别名等
安装常用命令
显示行号
环境变量
安装K8S
准备安装环境
主机部分
图1
图2
从机部分
图3
图4
图5
测试安装结果
master部署web
创建 deployment 资源配置文件
创建名为 service 服务配置文件
准备相关镜像
启动dashboard的deployment和service
删除dashboard
查看dashboard状态
另一种方式暴露外部ip
虚拟机配置
首先一些准备工作

环境变量、别名等
安装常用命令
yum -y install tree        # 安装tree
yum -y install vim        # 安装vim
yum -y install net-tools.x86_64 # 安装ifconfig
1
2
3
显示行号
echo "set number" >> /etc/vimrc
source /etc/vimrc
1
2
环境变量
vi /etc/profile
# export
export PS1='[\u@\h \w]\$ '
# alias
alias cr7='chmod 777 * -R'
alias sep='source /etc/profile'
alias vi='vim'

source /etc/profile
1
2
3
4
5
6
7
8
9
安装K8S
准备安装环境
# 1、 准备安装环境
# 1.1、安装epel-release源
yum -y install epel-release
# 1.2、关闭防火墙
systemctl stop firewalld    # 停止防火墙
systemctl disable firewalld    # 禁止开机启动
setenforce 0
firewall-cmd --state    # 查看防火墙状态
1
2
3
4
5
6
7
8
主机部分
# 2、主机部分
# 2.1、安装etcd、kubernetes-master
yum -y install etcd kubernetes-master
# 2.2、编辑 etcd.conf 文件
vi /etc/etcd/etcd.conf
# 修改结果如图1(好像没改...默认就是)
# 2.3、编辑 apiserve 文件
vi /etc/kubernetes/apiserver
# 修改结果如图2(改了两处)
# 2.4、启动etcd、kube-apiserver、kube-controller-manager、kube-scheduler等服务,并设置开机启动
for SERVICES in etcd kube-apiserver kube-controller-manager kube-scheduler; do systemctl restart $SERVICES;systemctl enable $SERVICES;systemctl status $SERVICES ; done
# 2.5、在etcd中定义flannel网络
etcdctl mk /atomic.io/network/config '{"Network":"172.17.0.0/16"}'
1
2
3
4
5
6
7
8
9
10
11
12
13
图1


图2


从机部分
# 3、node从机部分
# 3.1、在node机上192.168.26.228安装kubernetes Node和flannel组件应用
yum -y install flannel kubernetes-node
# 如果出错, 则删除对应的冲突
# yum -y remove docker-ce
# yum -y remove docker-ce-cli
# 3.2、为flannel网络指定etcd服务,修改/etc/sysconfig/flanneld文件,配置结果如图3
vi /etc/sysconfig/flanneld
# 修改ip为主机ip 10.10.108.164
# 3.3、修改:/etc/kubernetes/config文件,配置结果如图4
vi /etc/kubernetes/config
# 同样1个修改点,修改ip为主机ip 10.10.108.164
# 3.4、修改node机的kubelet配置文件/etc/kubernetes/kubelet,配置结果如图5
vi /etc/kubernetes/kubelet
# 4个修改点
# 10.10.108.165
# 3.5、node节点机上启动kube-proxy,kubelet,docker,flanneld等服务,并设置开机启动。
for SERVICES in kube-proxy kubelet docker flanneld;do systemctl restart $SERVICES;systemctl enable $SERVICES;systemctl status $SERVICES; done
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
图3


图4


图5


测试安装结果
# master 上执行如下命令
kubectl get nodes

# 测试成功结果如下
[root@h164 home]# kubectl get nodes
NAME            STATUS    AGE
10.10.108.165   Ready     7h
10.10.108.166   Ready     44m
10.10.108.167   Ready     35s
1
2
3
4
5
6
7
8
9
master部署web
创建 deployment 资源配置文件
vi dashboard-deployment.yaml

kind: Deployment
apiVersion: extensions/v1beta1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      annotations:
        scheduler.alpha.kubernetes.io/tolerations: |
          [
            {
              "key": "dedicated",
              "operator": "Equal",
              "value": "master",
              "effect": "NoSchedule"
            }
          ]
    spec:
      containers:
      - name: kubernetes-dashboard
        image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.3
        ports:
        - containerPort: 9090
          protocol: TCP
        args:
        livenessProbe:
          httpGet:
            path: /
            port: 9090
          initialDelaySeconds: 30
          timeoutSeconds: 30
---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  ports:
  - port: 80
    targetPort: 9090
  selector:
    k8s-app: kubernetes-dashboard

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
创建名为 service 服务配置文件
vi dashboard-service.yaml

apiVersion: v1  #指定api版本
kind: Service  #指定资源类型
metadata:  #资源元数据(属性)
  name: kubernetes-dashboard  #
  namespace: kube-system  #命名空间,需要保证与deployment处于同一命名空间
  labels:  #标签
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
spec:  #服务内容详细定义
  selector:  #标签选择器,选择dashboard-deployment独有标签
    k8s-app: kubernetes-dashboard
  ports:  #服务对外开放端口
    - port: 80  #提供给内部Pod访问使用的端口
      nodePort: 30303  #提供给外部访问的端口
      targetPort: 9090  #Pod内部的服务的端口号
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
准备相关镜像
# 所有节点都下载相关镜像,当然不下载也是没事的,因为可以自动下载。registry.access.redhat.com/rhel7/pod-infrastructure:latest和docker.io/bestwu/kubernetes-dashboard-amd64:v1.6.3,由于k8s已经在使用中,所以pod-infrastructure存在故不做下载。
docker pull docker.io/bestwu/kubernetes-dashboard-amd64:v1.6.3
1
2
启动dashboard的deployment和service
 kubectl create -f dashboard-deployment.yaml
 kubectl create -f dashboard-service.yaml
1
2
删除dashboard
kubectl delete -f dashboard-deployment.yaml
1
查看dashboard状态
kubectl get service --namespace=kube-system
kubectl get pod --namespace=kube-system -o wide
1
2
另一种方式暴露外部ip
#--------------------- namespaces -----------------------#
apiVersion: v1
kind: Namespace
metadata:
  name: ingress-nginx
---
#--------------------- ConfigMap--------------------------#
kind: ConfigMap
apiVersion: v1
metadata:
  name: nginx-configuration
  namespace: ingress-nginx
  labels:
    app: ingress-nginx
---

kind: ConfigMap
apiVersion: v1
metadata:
  name: tcp-services
  namespace: ingress-nginx
---

kind: ConfigMap
apiVersion: v1
metadata:
  name: udp-services
  namespace: ingress-nginx
---

apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-load-balancer-conf
data:
  enable-vts-status: "true"
---
#---------------------------rbac--------------------------#
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nginx-ingress-serviceaccount
  namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: nginx-ingress-clusterrole
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - endpoints
      - nodes
      - pods
      - secrets
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - services
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - "extensions"
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - ""
    resources:
        - events
    verbs:
        - create
        - patch
  - apiGroups:
      - "extensions"
    resources:
      - ingresses/status
    verbs:
      - update

---

apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
  name: nginx-ingress-role
  namespace: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - pods
      - secrets
      - namespaces
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - configmaps
    resourceNames:
      # Defaults to "<election-id>-<ingress-class>"
      # Here: "<ingress-controller-leader>-<nginx>"
      # This has to be adapted if you change either parameter
      # when launching the nginx-ingress-controller.
      - "ingress-controller-leader-nginx"
    verbs:
      - get
      - update
  - apiGroups:
      - ""
    resources:
      - configmaps
    verbs:
      - create
  - apiGroups:
      - ""
    resources:
      - endpoints
    verbs:
      - get

---

apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
  name: nginx-ingress-role-nisa-binding
  namespace: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: nginx-ingress-role
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx

---

apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: nginx-ingress-clusterrole-nisa-binding
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: nginx-ingress-clusterrole
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx
---
#------------------------nginx-controller-----------------#
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-ingress-controller
  namespace: ingress-nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ingress-nginx
  template:
    metadata:
      labels:
        app: ingress-nginx
      annotations:
        prometheus.io/port: '10254'
        prometheus.io/scrape: 'true'
    spec:
      serviceAccountName: nginx-ingress-serviceaccount
      hostNetwork: true
      containers:
        - name: nginx-ingress-controller
          image: quay-mirror.qiniu.com/kubernetes-ingress-controller/nginx-ingress-controller:0.15.0
          args:
            - /nginx-ingress-controller
            - --default-backend-service=$(POD_NAMESPACE)/default-http-backend
            - --configmap=$(POD_NAMESPACE)/nginx-configuration
            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
            - --publish-service=$(POD_NAMESPACE)/nginx-ingress-controller
            - --annotations-prefix=nginx.ingress.kubernetes.io
          env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
          ports:
          - name: http
            containerPort: 80
          - name: https
            containerPort: 443
          - containerPort: 8080
            hostPort: 8080
          livenessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            initialDelaySeconds: 10
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          securityContext:
            runAsNonRoot: false
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-ingress-controller
  namespace: ingress-nginx
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
    name: http
  - port: 443
    protocol: TCP
    targetPort: 443
    name: https
  - port: 8080
    protocol: TCP
    name: nginx-status
  selector:
    k8s-app: nginx-ingress-controller
  sessionAffinity: None
  type: ClusterIP

---

kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: nginx-status-ingress
  namespace: ingress-nginx
spec:
  rules:
  - host: nginx-ui.local
    http:
      paths:
      - path:
        backend:
          serviceName: nginx-ingress-controller
          servicePort: 8080
#-----------------------default-http-backend--------------#
apiVersion: v1
kind: ReplicationController
metadata:
  name: default-http-backend
  namespace: ingress-nginx
spec:
  replicas: 1
  selector:
    app: default-http-backend
  template:
    metadata:
      labels:
        app: default-http-backend
    spec:
      terminationGracePeriodSeconds: 60
      containers:
      - name: default-http-backend
        # Any image is permissable as long as:
        # 1. It serves a 404 page at /
        # 2. It serves 200 on a /healthz endpoint
        image: reg.qiniu.com/k8s/defaultbackend-amd64:1.4
        livenessProbe:
          httpGet:
            path: /healthz
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 30
          timeoutSeconds: 5
        ports:
        - containerPort: 8080
        resources:
          limits:
            cpu: 10m
            memory: 20Mi
          requests:
            cpu: 10m
            memory: 20Mi
---

apiVersion: v1
kind: Service
metadata:
  name: default-http-backend
  namespace: ingress-nginx
  labels:
    app: default-http-backend
spec:
  ports:
  - port: 80
    targetPort: 8080
  selector:
    app: default-http-backend
---

————————————————
版权声明:本文为CSDN博主「万独孤」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/wandugu/article/details/106609206

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值