基于docker-k8s的web集群

构建好项目的规划图

对每台服务器都需要做

配置好相应的IP地址

可参考Linux静态配置ip地址_诗与远方lp的博客-CSDN博客

关闭防火墙和selinux

service firewalld stop

systemctl disable firewalld

setenforce 0

sed -i '/^SELINUX=/ s/enforcing/disabled/' /etc/selinux/config

对负载均衡的服务器安装nginx和keepalived

编写一个脚本安装nginx

​
#!/bin/bash

#解决依赖关系

yum install epel-release -y

yum -y install openssl openssl-devel pcre pcre-devel gcc gcc-c++ automake make

#新建文件夹存放nginx源码包

mkdir -p /lp_nginx

cd /lp_nginx

#下载nginx

curl -O http://nginx.org/download/nginx-1.23.4.tar.gz

#解压

tar xf nginx-1.23.4.tar.gz

cd nginx-1.23.4

./configure --prefix=/usr/local/sclp --user=liaopeng --with-http_ssl_module --with-http_v2_module --with-threads --with-http_stub_status_module --with-stream

make -j 1

#安装

make install

#启动nginx

/usr/local/sclp/sbin/nginx

#修改PATH变量

PATH=$PATH:/usr/local/sclp/sbin

echo "PATH=$PATH:/usr/local/sclp/sbin" >>/root/.bashrc

#设置nginx开机启动

echo "/usr/local/sclp/sbin/nginx" >>/etc/rc.local

chmod +x /etc/rc.d/rc.local

#关闭防火墙、selinux和清除iptables规则

service firewalld stop

systemctl disable firewalld

setenforce 0

sed -i '/^SELINUX=/ s/enforcing/disabled/' /etc/selinux/config

​

安装keepalived

yum install keepalived -y

修改nginx.conf文件

​
http {

include mime.types;

default_type application/octet-stream;

log_format main '$remote_addr - $http_x_real_ip - $remote_user [$time_local] "$request" '

'$status $body_bytes_sent "$http_referer" '

'"$http_user_agent" "$http_x_forwarded_for"';

access_log logs/access.log main;

sendfile on;

#tcp_nopush on;

#keepalive_timeout 0;

keepalive_timeout 65;

#gzip on;

limit_conn_zone $binary_remote_addr zone=addr:10m;

upstream lpapp1 {

least_conn;

server 192.168.1.50:30008;

server 192.168.1.28:30008;

server 192.168.1.29:30008;

}

server {

listen 80;

server_name www.liaopeng.com;

access_log logs/peng.com.access.log main;

#charset koi8-r;

#access_log logs/host.access.log main;

location / {

root html;

proxy_pass http://lpapp1;

proxy_set_header X-Real-IP $remote_addr;

}

​

修改keepalived.conf文件

loadbalance1:

vrrp_skip_check_adv_addr

#vrrp_strict

vrrp_garp_interval 0

vrrp_gna_interval 0

}

vrrp_script chk_nginx {

script "/nginx/check_nginx.sh"

interval 1

weight -40

}

vrrp_instance VI_1 {

state BACKUP

interface ens33

virtual_router_id 60

priority 100

advert_int 1

authentication {

auth_type PASS

auth_pass 1111

}

virtual_ipaddress {

192.168.1.100

}

}

vrrp_instance VI_2 {

state MASTER

interface ens33

virtual_router_id 61

priority 120

advert_int 1

authentication {

auth_type PASS

auth_pass 1111

}

virtual_ipaddress {

192.168.1.90

}

track_script {

chk_nginx

}

}

loadbalance2:

vrrp_skip_check_adv_addr

#vrrp_strict

vrrp_garp_interval 0

vrrp_gna_interval 0

}

vrrp_script chk_nginx {

script "/nginx/check_nginx.sh"

interval 1

weight -40

}

vrrp_instance VI_1 {

state MASTER

interface ens33

virtual_router_id 60

priority 120

advert_int 1

authentication {

auth_type PASS

auth_pass 1111

}

virtual_ipaddress {

192.168.1.100

}

track_script {

chk_nginx

}

}

vrrp_instance VI_2 {

state BACKUP

interface ens33

virtual_router_id 61

priority 100

advert_int 1

authentication {

auth_type PASS

auth_pass 1111

}

virtual_ipaddress {

192.168.1.90

}

}

每个loadbalance建一个脚本检测负载均衡器nginx是否运行

vim /nginx/check_nginx.sh

#!/bin/bash

#检测nginx是否正常运行

if /usr/sbin/pidof nginx &>/etc/null;then

exit 0

else

exit 1

fi

启动keepalived

service keepalived start

下载docker容器

根据官方文档安装docker容器( Install Docker Engine on CentOS

卸载任何此类旧版本以及关联的依赖项

yum remove docker \

docker-client \

docker-client-latest \

docker-common \

docker-latest \

docker-latest-logrotate \

docker-logrotate \

docker-engine

安装软件包(提供实用程序)并设置存储库

yum install -y yum-utils

yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

安装 Docker Engine、containerd 和 Docker Compose

yum install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin

启动docker

service docker start

下载k8s

升级所有的能升级的软件,提高系统安全性

yum update -y

配置docker 使用systemd作为Cgroup驱动

cat  /etc/docker/daemon.json

{

"exec-opts": ["native.cgroupdriver=systemd"]

}

EOF

重启docker服务

systemctl restart docker

关闭交换分区swap分区

临时关闭

swapoff -a

永久关闭

vim /etc/fstab

将/dev/mapper/centos-swap swap                    swap    defaults        0 0  注释

每台机器上的/etc/hosts文件都需要修改

cat >> /etc/hosts 

192.168.1.28 node1

192.168.1.29 node2

192.168.1.50 master

EOF

每台机器上(master和node),永久修改

cat > /etc/sysctl.conf

net.bridge.bridge-nf-call-ip6tables = 1

net.bridge.bridge-nf-call-iptables = 1

net.ipv4.ip_nonlocal_bind = 1

net.ipv4.ip_forward = 1

vm.swappiness=0

EOF

让内核重新读取数据,加载生效

sysctl -p

添加kubernetes  yum源

cat > /etc/yum.repos.d/kubernetes.repo 

[kubernetes]

name=Kubernetes

baseurl=kubernetes-yum-repos-kubernetes-el7-x86_64安装包下载_开源镜像站-阿里云

enabled=1

gpgcheck=0

repo_gpgcheck=0

gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

EOF

安装kubeadm,kubelet和kubectl,并指定版本(1.24的版本默认的容器运行时环境不是docker了)

yum install -y kubelet-1.23.6 kubeadm-1.23.6 kubectl-1.23.6

设置kubelet开机启动

systemctl enable kubelet

准备镜像

docker pull coredns/coredns:1.8.4

docker tag coredns/coredns:1.8.4 registry.aliyuncs.com/google_containers/coredns:v1.8.4

在master上初始化

kubeadm init \

 --apiserver-advertise-address=192.168.1.50 \

 --image-repository registry.aliyuncs.com/google_containers \

 --service-cidr=10.1.0.0/16 \

 --pod-network-cidr=10.244.0.0/16

生产一些文件

mkdir -p $HOME/.kube

sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

sudo chown $(id -u):$(id -g) $HOME/.kube/config

将node节点加入k8s集群

kubeadm join 192.168.1.50:6443 --token e3r0ro.fmun761p5zyiurgq \

--discovery-token-ca-cert-hash sha256:beb10707b5bfd5a3c7a3e26d7de8df64c36bc7d99b50a946521bb4f297b2874a 

查看的效果

NotReady说明master和node节点之间的通信还是有问题的,容器之间的通信还没准备好

安装网络插件calico

先导入calico.yml文件

kubectl apply -f kube-flannel.yml

下载使用nfs服务

nfs服务器安装软件包,

yum install  nfs-utils -y

启动服务

systemctl start nfs

systemctl enable nfs  (开机自动启动nfs)

查看rpcbind 端口和进程

ps aux|grep nfs

​ ss -anplut|grep rpcbind

创建nfs共享目录

mkdir /web

cd /web

vim index.html

编辑/etc/exports文件

/web 192.168.1.0/24(rw,sync,all_squash)

/web 共享的目录的路径

192.168.1.0/24 允许能访问的机器的网段

(rw,sync,all_squash) 拥有的权限--> rw 可以读写  sync 在主机上修改数据可以同步到nfa服务器    all_squash   任何用户连接过来都看成一个普通用户nobody 

让共享目录生效

exportfs -av

设置共享目录的权限

chown nfsnobody:nfsnobody /web

所有的docker宿主机步骤都一样

客户端安装软件

yum install nfs-utils -y

客户机创建共享目录

mkdir /nfs-web

cd /nfs-web

将本机的/nfs-web挂载到nfs服务器的/web下

mount 192.168.1.128:/web /nfs-web

查看是否挂载上

df -Th|grep nfs

在nfs服务器上的web目录下编写一个index.html

每个docker 服务器创建一个卷挂载上nfs服务器共享的目录/web下

docker volume create --driver local --opt type=nfs --opt o=addr=192.168.1.128,nolock,soft,rw,sync --opt device=:/web task-nfs

查看卷的详细信息

docker volume inspect task-nfs

用master服务器的k8s编写一个yaml文件进行测试

apiVersion: apps/v1

kind: Deployment

metadata:

name: ngnginx-deployment

labels:

app: ngnginx

spec:

replicas: 3

selector:

matchLabels:

app: ngnginx

template:

metadata:

labels:

app: ngnginx

spec:

containers:

- name: nginx

image: nginx:1.14.2

imagePullPolicy: IfNotPresent

ports:

- containerPort: 80

volumeMounts:

- mountPath: /usr/share/nginx/html/

name: task-nfs

volumes:

- name: task-nfs

hostPath:

path: /nfs-web

type: Directory

---

apiVersion: v1

kind: Service

metadata:

name: nginx-service

spec:

type: NodePort

selector:

app: ngnginx

ports:

- name: name-of-service-port

protocol: TCP

port: 80

targetPort: 80

nodePort: 30008

出现使用nfs服务器上/web下的index.html内容,就表示使用卷挂载成功

创建PV的yaml文件

apiVersion: v1
kind: PersistentVolume
metadata:
  name: sc-nginx-pv
  labels:
    type: sc-nginx-pv
spec:
  capacity:
    storage: 5Gi 
  accessModes:
    - ReadWriteMany
  storageClassName: nfs  
  nfs:
    path: "/web"       
    server: 192.168.1.128   
    readOnly: false

 创建一个PVC的yaml文件

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: sc-nginx-pvc
spec:
  accessModes:
  - ReadWriteMany      
  resources:
     requests:
       storage: 1Gi
  storageClassName: nfs 

利用持久卷PV和持久卷申领PVC对数据进行持久化存储

apiVersion: apps/v1
kind: Deployment
metadata:
  name: ngnginx2
  labels:
    app: ngnginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: ngnginx
  template:
    metadata:
      labels:
        app: ngnginx
    spec:
      volumes:
      - name: sc-pv-storage-nfs
        persistentVolumeClaim:
          claimName: sc-nginx-pvc
      containers:
      - name: nginx
        image: nginx:1.14.2
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
          name: "http-server"
        volumeMounts:
        - mountPath: /usr/share/nginx/html/
          name: sc-pv-storage-nfs

---
apiVersion: v1
kind: Service
metadata:
  name: nginx-service
spec:
  type: NodePort
  selector:
    app: ngnginx
  ports:
  - name: name-of-service-port
    protocol: TCP
    port: 80
    targetPort: 80 
    nodePort: 30008

给node1增加一个污点

kubectl taint nodes node1 key1=value1:NoSchedule

给node2也增加一个污点

kubectl taint nodes node2 key2=value2:NoSchedule

这样就可以利用污点将pod分配到自己想要的宿主机上了

在yaml文件中添加污点的容忍度和探针

tolerations:
      - key: "key1"
        operator: "Equal"
        value: "value1"
        effect: "NoSchedule"
tolerations:
      - key: "key2"
        operator: "Equal"
        value: "value2"
        effect: "NoSchedule"
 args:
        - /bin/sh
        - -c
        - touch /tmp/healthy;sleep 30;rm -rf /tmp/healthy;sleep 600
        livenessProbe:
          exec:
            command:
            - cat 
            - /tmp/healthy
          initialDelaySeconds: 5
          periodSeconds: 5

在真实机器的c:/Windows/System32/drivers/tec/hosts添加两条域名解析做测试

在真实机器上去访问www.liaopeng.com

出现以上内容表示web集群已经搭建成功

利用compose启动prometheus

写一个yaml 文件给docker-compose.yml文件传递参数

scrape_configs:

- job_name: cadvisor

scrape_interval: 5s

static_configs:

- targets:

- cadvisor:8080

- job_name: load balancer1

scrape_interval: 5s

static_configs:

- targets:

- 192.168.1.27:9100

- job_name: load balancer2

scrape_interval: 5s

static_configs:

- targets:

- 192.168.1.30:9100

- job_name: master

scrape_interval: 5s

static_configs:

- targets:

- 192.168.1.50:9100

- job_name: node1

scrape_interval: 5s

static_configs:

- targets:

- 192.168.1.28:9100

- job_name: node2

scrape_interval: 5s

static_configs:

- targets:

- 192.168.1.29:9100

编写一个docker-compose.yml文件

version: '3.2'

services:

prometheus:

image: prom/prometheus:latest

container_name: prometheus

ports:

- 9090:9090

command:

- --config.file=/etc/prometheus/prometheus.yml

volumes:

- ./prometheus.yml:/etc/prometheus/prometheus.yml:ro

depends_on:

- cadvisor

cadvisor:

image: gcr.io/cadvisor/cadvisor:latest

container_name: cadvisor

ports:

- 8080:8080

volumes:

- /:/rootfs:ro

- /var/run:/var/run:rw

- /sys:/sys:ro

- /var/lib/docker/:/var/lib/docker:ro

depends_on:

- redis

redis:

image: redis:latest

container_name: redis

ports:

- 6379:6379

每台被监控的主机

mkdir /node-exporter

cd /node-exporter

到官网去下载node_exporter 传输到虚拟机的/node-exporter下

解压node_exporter压缩包

PATH=/node-exporter/node_exporter-1.5.0.linux-amd64:$PATH

启动node_exporter

nohup node_exporter --web.listen-address="0.0.0.0:9100" &

访问本机的9100端口,出现Node Exporter表示node_exporter已经准备好了

访问prometheus的宿主机的9090端口

grafana下载

wget https://dl.grafana.com/enterprise/release/grafana-enterprise-9.4.7-1.x86_64.rpm

yum install grafana-enterprise-9.4.7-1.x86_64.rpm

启动grafana

systemctl daemon-reload

systemctl start grafana-server

systemctl enable grafana-server

访问本机的3000端口

默认的账号和密码都是admin

导入监控node的模板 --》 8919

对web集群进行压力测试

压力测试 ab

yum install httpd-tools -y

ab -c 100 -n 3000 http://192.168.1.30/

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值