k8s使用yaml文件创建haproxy

web镜像制作

[root@master ~]# cd httpd/
[root@master httpd]# ls
Dockerfile
[root@master httpd]# vim Dockerfile 
[root@master httpd]# cat Dockerfile 
FROM busybox

RUN mkdir  /data && \
    echo "test page on v1" > /data/index.html
ENTRYPOINT ["/bin/httpd","-f","-h","/data"]

docker build -t jiejiehao/httpd:v1

[root@master httpd]# vim Dockerfile 
[root@master httpd]# cat Dockerfile 
FROM busybox

RUN mkdir  /data && \
    echo "test page on v2" > /data/index.html
ENTRYPOINT ["/bin/httpd","-f","-h","/data"]

docker build -t jiejiehao/httpd:v2

[root@master httpd]# docker images
REPOSITORY                                                        TAG        IMAGE ID       CREATED         SIZE
jiejiehao/httpd                                                   v1         477d3c5c8fe7   26 hours ago    1.24MB
jiejiehao/httpd                                                   v2         7236b2280632   26 hours ago    1.24MB
jiejiehao/haproxy                                                 v3         e305401a54a4   9 days ago      54.1MB

web01

[root@master ~]# cat web01.yaml 
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: web1
  namespace: default
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web1
  template:  
    metadata:
      labels:
        app: web1
    spec:
      containers:
      - name: web1
        image: jiejiehao/httpd:v1
        magePullPolicy: IfNotPresent
---
apiVersion: v1
kind: Service
metadata:
  name: web1
  namespace: default
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: web1
  type: NodePort


web02

[root@master ~]# cat web02.yaml 
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: web2
  namespace: default
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web2
  template:  
    metadata:
      labels:
        app: web2
    spec:
      containers:
      - name: httpd
        image: jiejiehao/httpd:v2
        magePullPolicy: IfNotPresent
---
apiVersion: v1
kind: Service
metadata:
  name: web2
  namespace: default
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: web2
  type: NodePort

创建web的server

[root@master ~]# kubectl create -f web01.yaml 
deployment.apps/web1 created
service/web1 created

[root@master ~]# kubectl create -f web02.yaml 
deployment.apps/web2 created
service/web2 created


[root@master ~]# kubectl get pod,svc
NAME                             READY   STATUS    RESTARTS   AGE
pod/web1-99dd54ccd-c5bff         1/1     Running   0          35s
pod/web2-d9c9695cf-4gw66         1/1     Running   0          26s

NAME                 TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
service/web1         NodePort    10.111.185.44    <none>        80:32001/TCP   35s
service/web2         NodePort    10.107.126.79    <none>        80:30515/TCP   26s

给node1节点添加一个标签

[root@master ~]# kubectl get node
NAME     STATUS   ROLES                  AGE    VERSION
master   Ready    control-plane,master   4d2h   v1.20.0
node1    Ready    <none>                 4d2h   v1.20.0
node2    Ready    <none>                 4d2h   v1.20.0
[root@master ~]# kubectl label node node1 disk=haproxy
node/node1 labeled


haproxy.yaml
haproxy镜像制作方法

[root@master ~]# cat haproxy.yaml 
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: haproxy
  namespace: default
spec:
  replicas: 1
  selector:
    matchLabels:
      app: haproxy
  template:  
    metadata:
      labels:
        app: haproxy
    spec:
      nodeSelector:
          disk: haproxy
      containers:
      - name: haproxy
        image: jiejiehao/haproxy:v3
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - name: data
          mountPath: /tmp
      volumes:
      - name: data
        hostPath:
          path: /root/haproxy_config
          type: Directory

在node1节点上创建一个目录

[root@node1 haproxy_config]# pwd
/root/haproxy_config
[root@node1 haproxy_config]# vim RSs.txt 
[root@node1 haproxy_config]# cat RSs.txt 
web1
web2

创建haproxy的pod

[root@master ~]# kubectl create  -f haproxy.yaml 
deployment.apps/haproxy created
[root@master ~]# kubectl get pod -o wide
NAME                         READY   STATUS    RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES
haproxy-b7f6cbf55-c2kf2      1/1     Running   0          23s     10.244.1.49   node1   <none>           <none>
web1-99dd54ccd-c5bff         1/1     Running   0          3m18s   10.244.2.41   node2   <none>           <none>
web2-d9c9695cf-4gw66         1/1     Running   0          3m9s    10.244.2.42   node2   <none>           <none>


访问测试

[root@master ~]# curl 10.244.1.49
test page on v1
[root@master ~]# curl 10.244.1.49
test page on v2
[root@master ~]# curl 10.244.1.49
test page on v1
[root@master ~]# curl 10.244.1.49
test page on v2


[root@master ~]# kubectl exec haproxy-b7f6cbf55-c2kf2 -- cat /tmp/RSs.txt 
web1
web2
[root@master ~]# kubectl exec haproxy-b7f6cbf55-c2kf2 -- cat /usr/local/haproxy/conf/haproxy.cfg
#--------------全局配置----------------
global
    log 127.0.0.1 local0  info
    #log loghost local0 info
    maxconn 20480
#chroot /usr/local/haproxy
    pidfile /var/run/haproxy.pid
    #maxconn 4000
    user haproxy
    group haproxy
    daemon
#---------------------------------------------------------------------
#common defaults that all the 'listen' and 'backend' sections will
#use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode http
    log global
    option dontlognull
    option httpclose
    option httplog
    #option forwardfor
    option redispatch
    balance roundrobin
    timeout connect 10s
    timeout client 10s
    timeout server 10s
    timeout check 10s
    maxconn 60000
    retries 3
#--------------统计页面配置------------------
listen admin_stats
    bind 0.0.0.0:8189
    stats enable
    mode http
    log global
    stats uri /haproxy_stats
    stats realm Haproxy\ Statistics
    stats auth admin:admin
    #stats hide-version
    stats admin if TRUE
    stats refresh 30s
#---------------web设置-----------------------
listen webcluster
    bind 0.0.0.0:80
    mode http
    #option httpchk GET /index.html
    log global
    maxconn 3000
    balance roundrobin
    cookie SESSION_COOKIE insert indirect nocache
    server web1 web1:80 check inter 2000 fall 5
    server web2 web2:80 check inter 2000 fall 5

Service的端口号暴露写法

---
apiVersion: v1
kind: Service
metadata:
  name: kube-node-service
  labels:
    name: kube-node-service
spec:
  type: NodePort      #这里代表是NodePort类型的
  ports:
  - port: 80          #这里的端口和clusterIP(10.97.114.36)对应,即10.97.114.36:80,供内部访问。
    targetPort: 8081  #端口一定要和container暴露出来的端口对应,nodejs暴露出来的端口是8081,所以这里也应是8081
    protocol: TCP
    nodePort: 32143   # 所有的节点都会开放此端口,此端口供外部调用。
  selector:
    app: web          #这里选择器一定要选择容器的标签,之前写name:kube-node是错的。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值