k8s1.13.3部署电商微服务

k8s1.13.3部署电商微服务

java代码编译打包(打包成jar包或war包)格式命令:

#cd 源代码目录

# mvn clean package -D maven.test.skip=true [-P prod]

#解释:-P prod:编译构建时引用哪一个配置文件

# clean: 清除目录中上一次操作生成的结果包,重新打新的包

# pacakge: 是生成打新的包

# -D maven.test.skip=true:  跳过单元测试

机器规划: k8s-master:  192.168.171.128     k8s-node1: 192.168.171.129     k8s-node2: 192.168.171.130

1.搭建k8s平台

[root@master ~]# kubectl get node

NAME     STATUS   ROLES    AGE   VERSION

master   Ready    master   77m   v1.13.3

node1    Ready    <none>   64m   v1.13.3

node2    Ready    <none>   44m   v1.13.3

2.安装docker-habor仓库

[root@master ~]# yum -y install docker-distribution.x86_64

[root@master ~]# systemctl enable docker-distribution.service

[root@master ~]# systemctl restart docker-distribution.service

[root@master ~]# netstat -anput |grep 5000

tcp6       0      0 :::5000                 :::*                    LISTEN      59861/registry

3.安装jdk1.8

[root@master ~]# ls jdk-8u144-linux-x64.tar.gz

jdk-8u144-linux-x64.tar.gz

[root@master ~]# tar -zxf jdk-8u144-linux-x64.tar.gz

[root@master ~]# mv jdk1.8.0_144/ /usr/local/

[root@master ~]# ls /usr/local/jdk1.8.0_144/

bin  COPYRIGHT  db  include  javafx-src.zip  jre  lib  LICENSE  man  README.html  release  src.zip  THIRDPARTYLICENSEREADME-JAVAFX.txt  THIRDPARTYLICENSEREADME.txt

[root@master ~]# vim /etc/profile

export JAVA_HOME=/usr/local/jdk1.8.0_144

export PATH=$JAVA_HOME/bin:$PATH

export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

[root@master ~]# source  /etc/profile

[root@master ~]# java -version

java version "1.8.0_144"

Java(TM) SE Runtime Environment (build 1.8.0_144-b01)

Java HotSpot(TM) 64-Bit Server VM (build 25.144-b01, mixed mode)

4.安装mvn

[root@master ~]# ls apache-maven-3.5.3-bin.tar.gz

apache-maven-3.5.3-bin.tar.gz

[root@master ~]# tar -zxf apache-maven-3.5.3-bin.tar.gz

[root@master ~]# mv apache-maven-3.5.3 /usr/local/

[root@master ~]# ls /usr/local/apache-maven-3.5.3/

bin  boot  conf  lib  LICENSE  NOTICE  README.txt

[root@master ~]# vim /etc/profile

export JAVA_HOME=/usr/local/jdk1.8.0_144

export PATH=$JAVA_HOME/bin:$PATH

export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

export MVN_HOME=/usr/local/apache-maven-3.5.3

export PATH=$PATH:$MVN_HOME/bin

[root@master ~]# source  /etc/profile

[root@master ~]# mvn -v

Apache Maven 3.5.3 (3383c37e1f9e9b3bc3df5050c29c8aff9f295297; 2018-02-25T03:49:05+08:00)

Maven home: /usr/local/apache-maven-3.5.3

Java version: 1.8.0_144, vendor: Oracle Corporation

Java home: /usr/local/jdk1.8.0_144/jre

Default locale: en_US, platform encoding: UTF-8

OS name: "linux", version: "3.10.0-514.el7.x86_64", arch: "amd64", family: "unix"

5.使用代码包部署注册中心服务-eureka-service到k8s集群:

1)准备代码和修改配置文件:

[root@master ~]# ls simple-microservice_all.tar.gz

simple-microservice_all.tar.gz

[root@master ~]# tar -zxf simple-microservice_all.tar.gz

[root@master ~]# cd simple-microservice_all

[root@master simple-microservice_all]# ls

simple-microservice-dev1  simple-microservice-dev2  simple-microservice-dev3  simple-microservice-dev4  simple-microservice-master

[root@master simple-microservice_all]# cd simple-microservice-dev3/

[root@master simple-microservice-dev3]# ls

basic-common  db  eureka-service  gateway-service  k8s  LICENSE  lombok.config  order-service  pom.xml  portal-service  product-service  README.md  stock-service

[root@master simple-microservice-dev3]# ls eureka-service/

Dockerfile  pom.xml  src

[root@master simple-microservice-dev3]# ls eureka-service/src/main/resources/   #两个配置文件和一个控制引用哪个配置文件

application-dev.yml  application-fat.yml  application.yml        #前面两个分别是开发、生产配置文件,后面是控制引用哪个配置文件

[root@master simple-microservice-dev3]# cat eureka-service/src/main/resources/application.yml

server:

  port: 8888         #定义eureka服务的端口

spring:

  application:

    name: eureka-server

  profiles:

    active: fat              #表示引用生产application-fat配置文件

[root@master simple-microservice-dev3]# cat eureka-service/src/main/resources/application-fat.yml

eureka:

  server:

    renewal-percent-threshold: 0.9

    enable-self-preservation: false

    eviction-interval-timer-in-ms: 40000

  instance:

    hostname: 127.0.0.1

    prefer-ip-address: false       #不是以ip的方式进行集群的相互联系注册,而是用下面dns解析的固定格式的名字(eureka-0/1/2)进行相互联系注册

  client:

    register-with-eureka: true

    serviceUrl:

      defaultZone: http://eureka-0.eureka.ms:${server.port}/eureka/,http://eureka-1.eureka.ms:${server.port}/eureka/,http://eureka-2.eureka.ms:${server.port}/eureka/

    fetch-registry: true

#${server.port}是引用的上面application.yml文件中的8888端口。

#注意: 配置连接方式的格式(Eureka集群节点Pod名称格式):  statefulset名字-索引号.service-name.namspace-name.域名或ip:端口

statefulset名字-索引号相当于起来的pod的名字,即: eureka-0/1/2  

2)根据准备和修改好的代码配置文件编译打包—打成注册中心jar包:

[root@master simple-microservice-dev3]# mvn clean package -D maven.test.skip=true   #编译打包成jar包,所有项目都打,但目前只用eureka

[root@master simple-microservice-dev3]# ls eureka-service/

Dockerfile  pom.xml  src  target

[root@master simple-microservice-dev3]# ls eureka-service/target/

classes  eureka-service.jar  eureka-service.jar.original  generated-sources  maven-archiver  maven-status

3)使用打成的jar包构建成注册中心的镜像并推送到镜像仓库:

[root@master simple-microservice-dev3]# cat eureka-service/Dockerfile     #Dockefile文件构建镜像并推送到仓库

FROM java:8-jdk-alpine                 #基础镜像

LABEL maintainer lizhenliang/www.ctnrs.com

RUN  apk add -U tzdata && \            #该基础镜像安装什么用apk命令

     ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime

COPY ./target/eureka-service.jar ./

EXPOSE 8888

CMD java -jar -Deureka.instance.hostname=${MY_POD_NAME}.eureka.ms /eureka-service.jar

[root@master simple-microservice-dev3]# cd eureka-service/

[root@master eureka-service]# ls

Dockerfile  pom.xml  src  target

[root@master eureka-service]# ls target/eureka-service.jar

eureka-service.jar

[root@master eureka-service]# docker build -t 192.168.171.128:5000/eureka:v2 .

[root@master eureka-service]# docker images |grep eureka

192.168.171.128:5000/eureka                                                      v2                  7d578ba37350        4 seconds ago       194MB

[root@master eureka-service]# vim /etc/docker/daemon.json    #所有节点都需要配置

{

"insecure-registries":["192.168.171.128:5000"]

}

[root@master eureka-service]# systemctl restart docker

[root@master eureka-service]# docker push 192.168.171.128:5000/eureka:v2  

[root@master eureka-service]# curl http://192.168.171.128:5000/v2/_catalog

{"repositories":["eureka"]}

[root@master eureka-service]# curl http://192.168.171.128:5000/v2/eureka/tags/list

{"name":"eureka","tags":["v2"]}

[root@master eureka-service]# cd

4)搭建Ingress-nginx-controller:—ingress负载均衡器的pod(nginx的pod端口是80,映射到宿主机的端口也是80—使用物理网络,跟pod对应宿主机一个网络)

[root@master ~]# mkdir ingress

[root@master ~]# cd ingress/

[root@master ingress]# wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/mandatory.yaml

[root@master ingress]# ls

mandatory.yaml

上传提前下载好的ingress镜像

[root@master ingress]# rz

[root@master ingress]# ls

mandatory.yaml  nginx-ingress-controller.tar

[root@master ingress]# docker load -i /root/ingress/nginx-ingress-controller.tar

[root@master ingress]# docker images |grep ingress

quay.io/kubernetes-ingress-controller/nginx-ingress-controller                   0.24.1              98675eb54d0e        5 months ago        631MB

[root@master ingress]# docker tag quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.24.1 192.168.171.128:5000/quay.io/nginx-ingress-controller:0.24.1

[root@master ingress]# docker images |grep ingress

192.168.171.128:5000/quay.io/nginx-ingress-controller                            0.24.1              98675eb54d0e        5 months ago        631MB

quay.io/kubernetes-ingress-controller/nginx-ingress-controller                   0.24.1              98675eb54d0e        5 months ago        631MB

[root@master ingress]# docker push 192.168.171.128:5000/quay.io/nginx-ingress-controller:0.24.1

[root@master ingress]# curl http://192.168.171.128:5000/v2/_catalog

{"repositories":["eureka","quay.io/nginx-ingress-controller"]}

[root@master ingress]# curl http://192.168.171.128:5000/v2/quay.io/nginx-ingress-controller/tags/list

{"name":"quay.io/nginx-ingress-controller","tags":["0.24.1"]}

[root@master ingress]# vim mandatory.yaml

apiVersion: v1

kind: Namespace

metadata:

  name: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

---

kind: ConfigMap

apiVersion: v1

metadata:

  name: nginx-configuration

  namespace: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

---

kind: ConfigMap

apiVersion: v1

metadata:

  name: tcp-services

  namespace: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

---

kind: ConfigMap

apiVersion: v1

metadata:

  name: udp-services

  namespace: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

---

apiVersion: v1

kind: ServiceAccount

metadata:

  name: nginx-ingress-serviceaccount

  namespace: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

---

apiVersion: rbac.authorization.k8s.io/v1beta1

kind: ClusterRole

metadata:

  name: nginx-ingress-clusterrole

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

rules:

  - apiGroups:

      - ""

    resources:

      - configmaps

      - endpoints

      - nodes

      - pods

      - secrets

    verbs:

      - list

      - watch

  - apiGroups:

      - ""

    resources:

      - nodes

    verbs:

      - get

  - apiGroups:

      - ""

    resources:

      - services

    verbs:

      - get

      - list

      - watch

  - apiGroups:

      - ""

    resources:

      - events

    verbs:

      - create

      - patch

  - apiGroups:

      - "extensions"

      - "networking.k8s.io"

    resources:

      - ingresses

    verbs:

      - get

      - list

      - watch

  - apiGroups:

      - "extensions"

      - "networking.k8s.io"

    resources:

      - ingresses/status

    verbs:

      - update

---

apiVersion: rbac.authorization.k8s.io/v1beta1

kind: Role

metadata:

  name: nginx-ingress-role

  namespace: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

rules:

  - apiGroups:

      - ""

    resources:

      - configmaps

      - pods

      - secrets

      - namespaces

    verbs:

      - get

  - apiGroups:

      - ""

    resources:

      - configmaps

    resourceNames:

      # Defaults to "<election-id>-<ingress-class>"

      # Here: "<ingress-controller-leader>-<nginx>"

      # This has to be adapted if you change either parameter

      # when launching the nginx-ingress-controller.

      - "ingress-controller-leader-nginx"

    verbs:

      - get

      - update

  - apiGroups:

      - ""

    resources:

      - configmaps

    verbs:

      - create

  - apiGroups:

      - ""

    resources:

      - endpoints

    verbs:

      - get

---

apiVersion: rbac.authorization.k8s.io/v1beta1

kind: RoleBinding

metadata:

  name: nginx-ingress-role-nisa-binding

  namespace: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: Role

  name: nginx-ingress-role

subjects:

  - kind: ServiceAccount

    name: nginx-ingress-serviceaccount

    namespace: ingress-nginx

---

apiVersion: rbac.authorization.k8s.io/v1beta1

kind: ClusterRoleBinding

metadata:

  name: nginx-ingress-clusterrole-nisa-binding

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: nginx-ingress-clusterrole

subjects:

  - kind: ServiceAccount

    name: nginx-ingress-serviceaccount

    namespace: ingress-nginx

---

apiVersion: apps/v1

kind: Deployment

metadata:

  name: nginx-ingress-controller

  namespace: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

spec:

  replicas: 2                #副本为2个

  selector:

    matchLabels:

      app.kubernetes.io/name: ingress-nginx

      app.kubernetes.io/part-of: ingress-nginx

  template:

    metadata:

      labels:

        app.kubernetes.io/name: ingress-nginx

        app.kubernetes.io/part-of: ingress-nginx

      annotations:

        prometheus.io/port: "10254"

        prometheus.io/scrape: "true"

    spec:

      hostNetwork: true        #添加,使用物理网络,跟pod对应宿主机一个网络

      serviceAccountName: nginx-ingress-serviceaccount

      containers:

        - name: nginx-ingress-controller

          #image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.1

          image: 192.168.171.128:5000/quay.io/nginx-ingress-controller:0.24.1   #修改镜像

          imagePullPolicy: IfNotPresent    #添加,防止下载不了,老是always从镜像仓库下载,添加后可手工先下载到本地

          args:

            - /nginx-ingress-controller

            - --configmap=$(POD_NAMESPACE)/nginx-configuration

            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services

            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services

            - --publish-service=$(POD_NAMESPACE)/ingress-nginx

            - --annotations-prefix=nginx.ingress.kubernetes.io

          securityContext:

            allowPrivilegeEscalation: true

            capabilities:

              drop:

                - ALL

              add:

                - NET_BIND_SERVICE

            # www-data -> 33

            runAsUser: 33

          env:

            - name: POD_NAME

              valueFrom:

                fieldRef:

                  fieldPath: metadata.name

            - name: POD_NAMESPACE

              valueFrom:

                fieldRef:

                  fieldPath: metadata.namespace

          ports:

            - name: http

              containerPort: 80    #该ingress的容器内端口

              hostPort: 80      #添加,指定调度到某个物理机后,在物理机上启动一个端口,来映射该ingress容器中端口,域名访问时候对应的端口

            - name: https

              containerPort: 443

              hostPort: 443     #添加,指定物理机端口,来映射容器中端口

          livenessProbe:

            failureThreshold: 3

            httpGet:

              path: /healthz

              port: 10254

              scheme: HTTP

            initialDelaySeconds: 10

            periodSeconds: 10

            successThreshold: 1

            timeoutSeconds: 10

          readinessProbe:

            failureThreshold: 3

            httpGet:

              path: /healthz

              port: 10254

              scheme: HTTP

            periodSeconds: 10

            successThreshold: 1

            timeoutSeconds: 10

---

[root@master ingress]# kubectl apply -f mandatory.yaml

[root@master ingress]# kubectl get pod -o wide -n ingress-nginx

NAME                                      READY   STATUS    RESTARTS   AGE    IP                NODE    NOMINATED NODE   READINESS GATES

nginx-ingress-controller-d68b4b97-jfxx9   1/1     Running   0          114s   192.168.171.129   node1   <none>           <none>

nginx-ingress-controller-d68b4b97-pxb6q   1/1     Running   0          114s   192.168.171.130   node2   <none>           <none>

[root@master ingress]# cd

5)使用k8s平台部署eureka注册中心服务:

[root@master ~]# cd simple-microservice_all/simple-microservice-dev3/

[root@master simple-microservice-dev3]# ls

basic-common  db  eureka-service  gateway-service  k8s  LICENSE  lombok.config  order-service  pom.xml  portal-service  product-service  README.md  stock-service

[root@master simple-microservice-dev3]# cd k8s/

[root@master k8s]# ls

docker_build.sh  eureka.yaml  gateway.yaml  order.yaml  portal.yaml  product.yaml  stock.yaml

[root@master k8s]# cat eureka.yaml

---

apiVersion: extensions/v1beta1

kind: Ingress

metadata:

  name: eureka

  namespace: ms

spec:

  rules:

    - host: eureka.ctnrs.com     #关联前端域名,需要能和相应node的ip解析(客户端访问前端域名,前端域名端口就是ingress-controller的端口:80

      http:

        paths:

        - path: /

          backend:

            serviceName: eureka    #关联后的service的name

            servicePort: 8888      #关联后端service的port

---

apiVersion: v1

kind: Service

metadata:

  name: eureka

  namespace: ms

spec:

  clusterIP: None

  #type: NodePort    #若要通过域名和nodeport端口访问可以释放该项,且注释上面:clusterIP: None

  ports:

  - port: 8888

    #nodePort: 30002      #若要通过域名和nodeport端口访问可以释放该项

    name: eureka

  selector:

    project: ms

    app: eureka

---

apiVersion: apps/v1

kind: StatefulSet

metadata:

  name: eureka

  namespace: ms

spec:

  replicas: 3

  selector:

    matchLabels:

      project: ms

      app: eureka

  serviceName: "eureka"

  template:

    metadata:

      labels:

        project: ms

        app: eureka

    spec:

      #imagePullSecrets:              #通过认证到镜像仓库才能下载

      #- name: registry-pull-secret

      containers:

      - name: eureka

        #image: 192.168.171.128:5000/microservice/eureka:2019-09-26-17-36-44

        image: 192.168.171.128:5000/eureka:v2

        ports:

          - protocol: TCP

            containerPort: 8888

        env:

          - name: MY_POD_NAME

            valueFrom:

              fieldRef:

                fieldPath: metadata.name

        resources:

          requests:

            cpu: 0.5

            memory: 256Mi

          limits:

            cpu: 1

            memory: 1Gi

        readinessProbe:

          tcpSocket:

            port: 8888

          initialDelaySeconds: 60

          periodSeconds: 10

        livenessProbe:

          tcpSocket:

            port: 8888

          initialDelaySeconds: 60

          periodSeconds: 10

[root@master k8s]# kubectl create namespace ms

[root@master k8s]# kubectl get namespace

NAME            STATUS   AGE

default         Active   124m

ingress-nginx   Active   7m22s

kube-public     Active   124m

kube-system     Active   124m

ms              Active   15s

[root@master k8s]# kubectl apply -f eureka.yaml

[root@master k8s]# kubectl get pod,svc,ing -n ms -o wide

NAME           READY   STATUS    RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES

pod/eureka-0   1/1     Running   0          3m20s   10.244.1.19   node1   <none>           <none>

pod/eureka-1   1/1     Running   0          2m18s   10.244.2.24   node2   <none>           <none>

pod/eureka-2   1/1     Running   0          75s     10.244.1.20   node1   <none>           <none>

NAME             TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)    AGE     SELECTOR

service/eureka   ClusterIP   None         <none>        8888/TCP   3m20s   app=eureka,project=ms

NAME                        HOSTS              ADDRESS   PORTS   AGE

ingress.extensions/eureka   eureka.ctnrs.com             80      3m20s

6)Windows配置hosts解析:

192.168.171.129 eureka.ctnrs.com

192.168.171.130 eureka.ctnrs.com

浏览器访问:http://eureka.ctnrs.com

 

6.在master机器(也可其他机器)安装mysql服务

[root@master ~]# yum -y install mariadb mariadb-server

[root@master ~]# systemctl start mariadb

[root@master ~]# systemctl enable mariadb

[root@localhost ~]# mysql   安装启动mariadb后,不需密码即可登录

MariaDB [(none)]> show databases;

+--------------------+

| Database           |

+--------------------+

| information_schema |

| mysql              |

| performance_schema |

| test               |

+--------------------+

MariaDB [(none)]> quit

接下来进行MariaDB的相关简单配置,使用命令: mysql_secure_installation

[root@localhost ~]# mysql_secure_installation   #简单安全安装配置,回车

NOTE: RUNNING ALL PARTS OF THIS SCRIPT IS RECOMMENDED FOR ALL MariaDB

      SERVERS IN PRODUCTION USE!  PLEASE READ EACH STEP CAREFULLY!

In order to log into MariaDB to secure it, we'll need the current

password for the root user.  If you've just installed MariaDB, and

you haven't set the root password yet, the password will be blank,

so you should just press enter here.

Enter current password for root (enter for none):    回车,输入当前密码,无,直接回车即可

OK, successfully used password, moving on...

Setting the root password ensures that nobody can log into the MariaDB

root user without the proper authorisation.  

Set root password? [Y/n] y

New password:             输入要设置的root密码: 123

Re-enter new password:    确认root的密码: 123

Password updated successfully!

Reloading privilege tables..

 ... Success!

By default, a MariaDB installation has an anonymous user, allowing anyone

to log into MariaDB without having to have a user account created for

them.  This is intended only for testing, and to make the installation

go a bit smoother.  You should remove them before moving into a

production environment.   #下面一路都是y

Remove anonymous users? [Y/n] y  是否删除匿名用户,回车

 ... Success!

Normally, root should only be allowed to connect from 'localhost'.  This

ensures that someone cannot guess at the root password from the network.

Disallow root login remotely? [Y/n] y  是否禁止root远程登录,回车

 ... Success!

By default, MariaDB comes with a database named 'test' that anyone can

access.  This is also intended only for testing, and should be removed

before moving into a production environment.

Remove test database and access to it? [Y/n] y  是否删除test数据库,回车

 - Dropping test database...

 ... Success!

 - Removing privileges on test database...

 ... Success!

Reloading the privilege tables will ensure that all changes made so far

will take effect immediately.

Reload privilege tables now? [Y/n] y  是否重新加载权限表,回车

 ... Success!

Cleaning up...

All done!  If you've completed all of the above steps, your MariaDB

installation should now be secure.

Thanks for using MariaDB!

[root@localhost ~]# mysql -uroot -p123  #使用设置的密码登录mysql

MariaDB [(none)]> show databases;

+--------------------+

| Database           |

+--------------------+

| information_schema |

| mysql              |

| performance_schema |

+--------------------+

MariaDB [(none)]> quit

[root@master ~]# ls /root/simple-microservice_all/simple-microservice-dev3/db/

order.sql  product.sql  stock.sql

[root@master ~]# mysql -uroot -p123

MariaDB [(none)]> create database tb_order;

MariaDB [(none)]> create database tb_product;

MariaDB [(none)]> create database tb_stock;

MariaDB [(none)]> use tb_order;

MariaDB [tb_order]> source /root/simple-microservice_all/simple-microservice-dev3/db/order.sql;

MariaDB [tb_order]> use tb_product;

MariaDB [tb_product]> source /root/simple-microservice_all/simple-microservice-dev3/db/product.sql;

MariaDB [tb_product]> use tb_stock;

MariaDB [tb_stock]> source /root/simple-microservice_all/simple-microservice-dev3/db/stock.sql;

MariaDB [tb_stock]> show databases;

+--------------------+

| Database           |

+--------------------+

| information_schema |

| mysql              |

| performance_schema |

| tb_order           |

| tb_product         |

| tb_stock           |

+--------------------+

MariaDB [(none)]> grant all on *.* to root@'%' identified by '123';    #再授权一个能远程连接mysql的root密码

MariaDB [(none)]> flush privileges;

MariaDB [(none)]> quit

7.使用k8s平台部署gateway网关服务

[root@master ~]# cd simple-microservice_all/simple-microservice-dev3/

[root@master simple-microservice-dev3]# ls

basic-common  db  eureka-service  gateway-service  k8s  LICENSE  lombok.config  order-service  pom.xml  portal-service  product-service  README.md  stock-service

[root@master simple-microservice-dev3]# ls gateway-service/

Dockerfile  pom.xml  src  target   #注意此处的target文件夹是因为编译registry服务时全部服务都编译了,还需要针对不同服务修改配置后重新编译

1)确认配置文件配置:

[root@master simple-microservice-dev3]# ls gateway-service/src/main/resources/

application-dev.yml  application-fat.yml  application.yml

[root@master simple-microservice-dev3]# cat gateway-service/src/main/resources/application.yml

server:

  port: 9999

spring:

  profiles:

    active: fat     #启用的配置文件是fat.yml

  application:

    name: @artifactId@

[root@master simple-microservice-dev3]# cat gateway-service/src/main/resources/application-fat.yml 

spring:

  cloud:

    gateway:

      discovery:

        locator:

          #开启以服务id去注册中心上获取转发地址

          enabled: true

          ##小写serviceId

          lower-case-service-id: true

      routes:

        - id: product-service

          uri: lb://product-service

          filters:

            - StripPrefix=1

          predicates:

            - Path=/product/**

        - id: order-service

          uri: lb://order-service

          filters:

            - StripPrefix=1

          predicates:

            - Path=/order/**

        - id: stock-service

          uri: lb://stock-service

          filters:

            - StripPrefix=1

          predicates:

            - Path=/stock/**

eureka:

  instance:

    prefer-ip-address: true   #以ip形式注册到注册中心中

  client:

    register-with-eureka: true

    fetch-registry: true

    service-url:

      defaultZone: http://eureka-0.eureka.ms:8888/eureka,http://eureka-1.eureka.ms:8888/eureka,http://eureka-2.eureka.ms:8888/eureka

#上面是连接注册中心的配置

2)根据准备和修改好的代码配置文件编译打包—打成注册中心jar包:

[root@master simple-microservice-dev3]# mvn clean package -D maven.test.skip=true   #编译打包成jar包,所有项目都打,但目前只用gateway

[root@master simple-microservice-dev3]# ls gateway-service/

Dockerfile  pom.xml  src  target

[root@master simple-microservice-dev3]# ls gateway-service/target/

classes  gateway-service.jar  gateway-service.jar.original  generated-sources  maven-archiver  maven-status

3)使用打成的jar包构建成注册中心的镜像并推送到镜像仓库:

[root@master simple-microservice-dev3]# cat gateway-service/Dockerfile   #Dockefile文件构建镜像并推送到仓库

FROM java:8-jdk-alpine           #基础镜像

LABEL maintainer lizhenliang/www.ctnrs.com

RUN  apk add -U tzdata && \       #该基础镜像安装什么用apk命令

     ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime

COPY ./target/gateway-service.jar ./

EXPOSE 9999

CMD java -jar /gateway-service.jar

[root@master simple-microservice-dev3]# cd gateway-service/

[root@master gateway-service]# ls

Dockerfile  pom.xml  src  target

[root@master gateway-service]# ls target/gateway-service.jar

target/gateway-service.jar

[root@master gateway-service]# docker build -t 192.168.171.128:5000/gateway:v2 .

[root@master gateway-service]# docker images |grep gateway

192.168.171.128:5000/gateway                                                     v2                  829183077210        37 seconds ago      191MB

[root@master gateway-service]# vim /etc/docker/daemon.json    #所有节点都需要配置

{

"insecure-registries":["192.168.171.128:5000"]

}

[root@master gateway-service]# systemctl restart docker

[root@master gateway-service]# docker push 192.168.171.128:5000/gateway:v2  

[root@master gateway-service]# curl http://192.168.171.128:5000/v2/_catalog

{"repositories":["eureka","gateway","quay.io/nginx-ingress-controller"]}

[root@master gateway-service]# curl http://192.168.171.128:5000/v2/gateway/tags/list

{"name":"gateway","tags":["v2"]}

4)k8s部署gateway服务

[root@master gateway-service]# cd ../k8s/

[root@master k8s]# ls

docker_build.sh  eureka.yaml.bak  order.yaml   product.yaml

eureka.yaml      gateway.yaml     portal.yaml  stock.yaml

[root@master k8s]# vim gateway.yaml     #前提是安装ingress-controller,前面已经安装

---

apiVersion: extensions/v1beta1

kind: Ingress

metadata:

  name: gateway

  namespace: ms

spec:

  rules:

    - host: gateway.ctnrs.com        #关联前端域名,需要能和相应node的ip解析(客户端访问前端域名,前端域名端口就是ingress-controller的端口:80

      http:

        paths:

        - path: /

          backend:

            serviceName: gateway  #关联后的service的name

            servicePort: 9999     #关联后的service的port

---

apiVersion: v1

kind: Service

metadata:

  name: gateway

  namespace: ms

spec:

  ports:

  - port: 9999

    name: gateway

  selector:

    project: ms

    app: gateway

---

apiVersion: apps/v1

kind: Deployment 

metadata:

  name: gateway

  namespace: ms

spec:

  replicas: 2

  selector:

    matchLabels:

      project: ms

      app: gateway

  template:

    metadata:

      labels:

        project: ms

        app: gateway

    spec:

      imagePullSecrets:

      - name: registry-pull-secret

      containers:

      - name: gateway

        image: 192.168.171.128:5000/gateway:v2   #修改镜像

        imagePullPolicy: Always

        ports:

          - protocol: TCP

            containerPort: 9999

        resources:

          requests:

            cpu: 0.5

            memory: 256Mi

          limits:

            cpu: 1

            memory: 1Gi

        readinessProbe:

          tcpSocket:

            port: 9999

          initialDelaySeconds: 60

          periodSeconds: 10

        livenessProbe:

          tcpSocket:

            port: 9999

          initialDelaySeconds: 60

          periodSeconds: 10

[root@master k8s]# kubectl create namespace ms   #前面已操作过

[root@master k8s]# kubectl apply -f gateway.yaml

[root@master k8s]# kubectl get pod,svc,ing -n ms -o wide

NAME                           READY   STATUS    RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES

pod/eureka-0                   1/1     Running   0          5m25s   10.244.1.19   node1   <none>           <none>

pod/eureka-1                   1/1     Running   0          4m23s   10.244.2.24   node2   <none>           <none>

pod/eureka-2                   1/1     Running   0          3m20s   10.244.1.20   node1   <none>           <none>

pod/gateway-7d456fd888-c2nbw   1/1     Running   0          75s     10.244.1.21   node1   <none>           <none>

pod/gateway-7d456fd888-gzbn4   1/1     Running   0          75s     10.244.2.25   node2   <none>           <none>

NAME              TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)    AGE     SELECTOR

service/eureka    ClusterIP   None          <none>        8888/TCP   5m25s   app=eureka,project=ms

service/gateway   ClusterIP   10.1.106.93   <none>        9999/TCP   76s     app=gateway,project=ms

NAME                         HOSTS               ADDRESS   PORTS   AGE

ingress.extensions/eureka    eureka.ctnrs.com              80      5m25s

ingress.extensions/gateway   gateway.ctnrs.com             80      76s

到注册中心界面查看gateway网关服务会加入注册中心,如下:

8.使用k8s平台部署业务服务——商品服务,存储服务,订单服务

1)k8s部署商品product服务:

[root@master ~]# cd simple-microservice_all/simple-microservice-dev3/

[root@master simple-microservice-dev3]# ls

basic-common  db  eureka-service  gateway-service  k8s  LICENSE  lombok.config  order-service  pom.xml  portal-service  product-service  README.md  stock-service

[root@master simple-microservice-dev3]# ls product-service/

pom.xml  product-service-api  product-service-biz

[root@master simple-microservice-dev3]# ls product-service/product-service-biz/

Dockerfile  pom.xml  src  target    #注意此处的target文件夹是因为编译上面服务时全部服务都编译了,还需要针对不同服务修改配置后重新编译

a)确认配置文件配置:

[root@master simple-microservice-dev3]# ls product-service/product-service-biz/src/main/resources/

application-dev.yml  application-fat.yml  application.yml

[root@master simple-microservice-dev3]# cat product-service/product-service-biz/src/main/resources/application.yml

server:

  port: 8010

spring:

  profiles:

    active: fat  #启用的配置文件是fat.yml

  application:

    name: product-service

[root@master simple-microservice-dev3]# cat product-service/product-service-biz/src/main/resources/application-fat.yml

spring:

  datasource:

    url: jdbc:mysql://192.168.171.128:3306/tb_product?characterEncoding=utf-8    #确定连接的mysql地址

    username: root                 

    password: 123

    driver-class-name: com.mysql.jdbc.Driver

eureka:

  instance:

    prefer-ip-address: true

  client:

    register-with-eureka: true

    fetch-registry: true

    service-url:

      defaultZone: http://eureka-0.eureka.ms:8888/eureka,http://eureka-1.eureka.ms:8888/eureka,http://eureka-2.eureka.ms:8888/eureka

#上面是连接注册中心的配置

2)根据准备和修改好的代码配置文件编译打包—打成注册中心jar包:

[root@master simple-microservice-dev3]# mvn clean package -D maven.test.skip=true   #编译打包成jar包,所有项目都打,但目前只用gateway

[root@master simple-microservice-dev3]# ls product-service/

pom.xml  product-service-api  product-service-biz

[root@master simple-microservice-dev3]# ls product-service/product-service-biz/

Dockerfile  pom.xml  src  target

[root@master simple-microservice-dev3]# ls product-service/product-service-biz/target/product-service-biz.jar

product-service/product-service-biz/target/product-service-biz.jar

3)使用打成的jar包构建成注册中心的镜像并推送到镜像仓库:

[root@master simple-microservice-dev3]# cd product-service/product-service-biz/

[root@master product-service-biz]# ls

Dockerfile  pom.xml  src  target

[root@master product-service-biz]# ls target/product-service-biz.jar

target/product-service-biz.jar

[root@master product-service-biz]# cat Dockerfile   #Dockefile文件构建镜像并推送到仓库

FROM java:8-jdk-alpine                  #基础镜像

LABEL maintainer lizhenliang/www.ctnrs.com

RUN  apk add -U tzdata && \            #该基础镜像安装什么用apk命令

     ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime

COPY ./target/product-service-biz.jar ./

EXPOSE 8010

CMD java -jar /product-service-biz.jar

[root@master product-service-biz]# docker build -t 192.168.171.128:5000/product:v2 .

[root@master product-service-biz]# docker images |grep product

192.168.171.128:5000/product                                                     v2                    2d8069ba8e3f        17 seconds ago      193MB

[root@master product-service-biz]#vim /etc/docker/daemon.json    #所有节点都需要配置

{

"insecure-registries":["192.168.171.128:5000"]

}

[root@master product-service-biz]# systemctl restart docker

[root@master product-service-biz]# docker push 192.168.171.128:5000/product:v2  

[root@master product-service-biz]# curl http://192.168.171.128:5000/v2/_catalog

{"repositories":["eureka","gateway","product","quay.io/nginx-ingress-controller"]}

[root@master product-service-biz]# curl http://192.168.171.128:5000/v2/product/tags/list

{"name":"product","tags":["v2"]}

4)k8s部署product服务

[root@master product-service-biz]# cd ../../k8s/

[root@master k8s]# ls

docker_build.sh  eureka.yaml.bak  order.yaml   product.yaml

eureka.yaml      gateway.yaml     portal.yaml  stock.yaml

[root@master k8s]# vim product.yaml     #前提是安装ingress-controller,前面已经安装  

apiVersion: apps/v1

kind: Deployment

metadata:

  name: product

  namespace: ms

spec:

  replicas: 2

  selector:

    matchLabels:

      project: ms

      app: product

  template:

    metadata:

      labels:

        project: ms

        app: product

    spec:

      imagePullSecrets:

      - name: registry-pull-secret

      containers:

      - name: product

        image: 192.168.171.128:5000/product:v2   #修改镜像

        imagePullPolicy: Always

        ports:

          - protocol: TCP

            containerPort: 8010

        resources:

          requests:

            cpu: 0.5

            memory: 256Mi

          limits:

            cpu: 1

            memory: 1Gi

        readinessProbe:

          tcpSocket:

            port: 8010

          initialDelaySeconds: 60

          periodSeconds: 10

        livenessProbe:

          tcpSocket:

            port: 8010

          initialDelaySeconds: 60

          periodSeconds: 10

[root@master k8s]# kubectl create namespace ms   #前面已操作过

[root@master k8s]# kubectl apply -f product.yaml 

[root@master k8s]# kubectl get pod,svc,ing -n ms -o wide

Xxxxxx

9.使用k8s平台部署业务服务——商品服务,存储服务,订单服务

1)k8s部署存储stock服务:

[root@master ~]# cd simple-microservice_all/simple-microservice-dev3/

[root@master simple-microservice-dev3]# ls

basic-common  db  eureka-service  gateway-service  k8s  LICENSE  lombok.config  order-service  pom.xml  portal-service  product-service  README.md  stock-service

[root@master simple-microservice-dev3]# ls stock-service/

pom.xml  stock-service-api  stock-service-biz

[root@master simple-microservice-dev3]# ls stock-service/stock-service-biz/

Dockerfile  pom.xml  src  target   #注意此处的target文件夹是因为编译上面服务时全部服务都编译了,还需要针对不同服务修改配置后重新编译

a)确认配置文件配置:

[root@master simple-microservice-dev3]# ls stock-service/stock-service-biz/src/main/resources/

application-dev.yml  application-fat.yml  application.yml

[root@master simple-microservice-dev3]# cat stock-service/stock-service-biz/src/main/resources/application.yml

server:

  port: 8030

spring:

  profiles:

    active: fat  #启用的配置文件是fat.yml

  application:

    name: stock-service

[root@master simple-microservice-dev3]# cat stock-service/stock-service-biz/src/main/resources/application-fat.yml

spring:

  datasource:

    url: jdbc:mysql://192.168.171.128:3306/tb_stock?characterEncoding=utf-8  #确定连接的mysql地址

    username: root

    password: 123

    driver-class-name: com.mysql.jdbc.Driver

eureka:

  instance:

    prefer-ip-address: true

  client:

    register-with-eureka: true

    fetch-registry: true

    service-url:

      defaultZone: http://eureka-0.eureka.ms:8888/eureka,http://eureka-1.eureka.ms:8888/eureka,http://eureka-2.eureka.ms:8888/eureka

#上面是连接注册中心的配置

2)根据准备和修改好的代码配置文件编译打包—打成注册中心jar包:

[root@master simple-microservice-dev3]# mvn clean package -D maven.test.skip=true   #编译打包成jar包,所有项目都打,但目前只用gateway

[root@master simple-microservice-dev3]# ls stock-service/stock-service-biz/

Dockerfile  pom.xml  src  target

[root@master simple-microservice-dev3]# ls stock-service/stock-service-biz/target/

classes            maven-archiver  stock-service-biz.jar

generated-sources  maven-status    stock-service-biz.jar.original

3)使用打成的jar包构建成注册中心的镜像并推送到镜像仓库:

[root@master simple-microservice-dev3]# cat stock-service/stock-service-biz/Dockerfile   #Dockefile文件构建镜像并推送到仓库

FROM java:8-jdk-alpine                         #基础镜像

LABEL maintainer lizhenliang/www.ctnrs.com

RUN  apk add -U tzdata && \                    #该基础镜像安装什么用apk命令

     ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime

COPY ./target/stock-service-biz.jar ./

EXPOSE 8030

CMD java -jar /stock-service-biz.jar

[root@master simple-microservice-dev3]# cd stock-service/stock-service-biz/

[root@master stock-service-biz]# ls

Dockerfile  pom.xml  src  target

[root@master stock-service-biz]# ls target/stock-service-biz.jar

target/stock-service-biz.jar

[root@master stock-service-biz]# docker build -t 192.168.171.128:5000/stock:v2 .

[root@master stock-service-biz]# docker images |grep stock

192.168.171.128:5000/stock                                                       v2                    af01d49b2913        31 seconds ago      193MB

[root@master stock-service-biz]#  vim /etc/docker/daemon.json    #所有节点都需要配置

{

"insecure-registries":["192.168.171.128:5000"]

}

[root@master stock-service-biz]# systemctl restart docker

[root@master stock-service-biz]# docker push 192.168.171.128:5000/stock:v2  

[root@master stock-service-biz]# curl http://192.168.171.128:5000/v2/_catalog

{"repositories":["eureka","gateway","product","quay.io/nginx-ingress-controller","stock"]}

[root@master stock-service-biz]# curl http://192.168.171.128:5000/v2/stock/tags/list

{"name":"stock","tags":["v2"]}

4)k8s部署stock服务

[root@master stock-service-biz]# cd ../../k8s/

[root@master k8s]# ls

docker_build.sh  eureka.yaml  eureka.yaml.bak  gateway.yaml  order.yaml  portal.yaml  product.yaml  stock.yaml

[root@master k8s]# vim stock.yaml    #前提是安装ingress-controller,前面已经安装

apiVersion: apps/v1

kind: Deployment

metadata:

  name: stock

  namespace: ms

spec:

  replicas: 2

  selector:

    matchLabels:

      project: ms

      app: stock

  template:

    metadata:

      labels:

        project: ms

        app: stock

    spec:

      imagePullSecrets:

      - name: registry-pull-secret

      containers:

      - name: stock

        image: 192.168.171.128:5000/stock:v2      #修改镜像

        imagePullPolicy: Always

        ports:

          - protocol: TCP

            containerPort: 8030

        resources:

          requests:

            cpu: 0.5

            memory: 256Mi

          limits:

            cpu: 1

            memory: 1Gi

        readinessProbe:

          tcpSocket:

            port: 8030

          initialDelaySeconds: 60

          periodSeconds: 10

        livenessProbe:

          tcpSocket:

            port: 8030

          initialDelaySeconds: 60

          periodSeconds: 10

[root@master k8s]# kubectl create namespace ms   #前面已操作过

[root@master k8s]# kubectl apply -f stock.yaml

[root@master k8s]# kubectl get pod,svc,ing -n ms -o wide

Xxxx

10.使用k8s平台部署业务服务——商品服务,存储服务,订单服务

1)k8s部署订单order服务:

[root@master ~]# cd simple-microservice_all/simple-microservice-dev3/

[root@master simple-microservice-dev3]# ls

basic-common  db  eureka-service  gateway-service  k8s  LICENSE  lombok.config  order-service  pom.xml  portal-service  product-service  README.md  stock-service

[root@master simple-microservice-dev3]# ls order-service/

order-service-api  order-service-biz  pom.xml

[root@master simple-microservice-dev3]# ls order-service/order-service-biz/

Dockerfile  pom.xml  src  target   #注意此处的target文件夹是因为编译上面服务时全部服务都编译了,还需要针对不同服务修改配置后重新编译

a)确认配置文件配置:

[root@master simple-microservice-dev3]# ls order-service/order-service-biz/src/main/resources/

application-dev.yml  application-fat.yml  application.yml

[root@master simple-microservice-dev3]# cat order-service/order-service-biz/src/main/resources/application.yml

server:

  port: 8020

spring:

  profiles:

    active: fat  #启用的配置文件是fat.yml

  application:

    name: order-service

[root@master simple-microservice-dev3]# cat order-service/order-service-biz/src/main/resources/application-fat.yml

spring:

  datasource:

    url: jdbc:mysql://192.168.171.128:3306/tb_order?characterEncoding=utf-8     #确定连接的mysql地址

    username: root

    password: 123

    driver-class-name: com.mysql.jdbc.Driver

eureka:

  instance:

    prefer-ip-address: true

  client:

    register-with-eureka: true

    fetch-registry: true

    service-url:

      defaultZone: http://eureka-0.eureka.ms:8888/eureka,http://eureka-1.eureka.ms:8888/eureka,http://eureka-2.eureka.ms:8888/eureka

#上面是连接注册中心的配置

2)根据准备和修改好的代码配置文件编译打包—打成注册中心jar包:

[root@master simple-microservice-dev3]# mvn clean package -D maven.test.skip=true   #编译打包成jar包,所有项目都打,但目前只用gateway

[root@master simple-microservice-dev3]# ls order-service/order-service-biz/

Dockerfile  pom.xml  src  target

[root@master simple-microservice-dev3]# ls order-service/order-service-biz/target/

classes            maven-archiver  order-service-biz.jar

generated-sources  maven-status    order-service-biz.jar.original

3)使用打成的jar包构建成注册中心的镜像并推送到镜像仓库:

[root@master simple-microservice-dev3]# cat order-service/order-service-biz/Dockerfile  #Dockefile文件构建镜像并推送到仓库

FROM java:8-jdk-alpine                #基础镜像

LABEL maintainer lizhenliang/www.ctnrs.com

RUN  apk add -U tzdata && \           #该基础镜像安装什么用apk命令

     ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime

COPY ./target/order-service-biz.jar ./

EXPOSE 8020

CMD java -jar /order-service-biz.jar

[root@master simple-microservice-dev3]# cd order-service/order-service-biz/

[root@master order-service-biz]# ls

Dockerfile  pom.xml  src  target

[root@master order-service-biz]# ls target/order-service-biz.jar

target/order-service-biz.jar

[root@master order-service-biz]# docker build -t 192.168.171.128:5000/order:v2 .

[root@master order-service-biz]# docker images |grep order

192.168.171.128:5000/order                                                       v2                    2c1a0b8eb6e3        About a minute ago   193MB

[root@master order-service-biz]# vim /etc/docker/daemon.json    #所有节点都需要配置

{

"insecure-registries":["192.168.171.128:5000"]

}

[root@master order-service-biz]# systemctl restart docker

[root@master gateway-service]# docker push 192.168.171.128:5000/order:v2  

[root@master order-service-biz]# curl http://192.168.171.128:5000/v2/_catalog

{"repositories":["eureka","gateway","product","quay.io/nginx-ingress-controller","stock","order"]}

[root@master order-service-biz]# curl http://192.168.171.128:5000/v2/order/tags/list

{"name":"order","tags":["v2"]}

4)k8s部署order服务

[root@master order-service-biz]# cd ../../k8s/

[root@master k8s]# ls

docker_build.sh  eureka.yaml.bak  order.yaml   product.yaml

eureka.yaml      gateway.yaml     portal.yaml  stock.yaml

[root@master k8s]# vim order.yaml     #前提是安装ingress-controller,前面已经安装  

apiVersion: apps/v1

kind: Deployment

metadata:

  name: order

  namespace: ms

spec:

  replicas: 2

  selector:

    matchLabels:

      project: ms

      app: order

  template:

    metadata:

      labels:

        project: ms

        app: order

    spec:

      imagePullSecrets:

      - name: registry-pull-secret

      containers:

      - name: order

        image: 192.168.171.128:5000/order:v2    #修改镜像

        imagePullPolicy: Always

        ports:

          - protocol: TCP

            containerPort: 8020

        resources:

          requests:

            cpu: 0.5

            memory: 256Mi

          limits:

            cpu: 1

            memory: 1Gi

        readinessProbe:

          tcpSocket:

            port: 8020

          initialDelaySeconds: 60

          periodSeconds: 10

        livenessProbe:

          tcpSocket:

            port: 8020

          initialDelaySeconds: 60

          periodSeconds: 10

[root@master k8s]# kubectl create namespace ms   #前面已操作过

[root@master k8s]# kubectl apply -f order.yaml

[root@master k8s]# kubectl get pod,svc,ing -n ms -o wide

Xxxx

11.使用k8s平台部署portal前端服务

1)k8s部署前端访问portal服务:

[root@master ~]# cd simple-microservice_all/simple-microservice-dev3/

[root@master simple-microservice-dev3]# ls

basic-common  db  eureka-service  gateway-service  k8s  LICENSE  lombok.config  order-service  pom.xml  portal-service  product-service  README.md  stock-service

[root@master simple-microservice-dev3]# ls portal-service/

Dockerfile  pom.xml  src  target   #注意此处的target文件夹是因为编译上面服务时全部服务都编译了,还需要针对不同服务修改配置后重新编译

1)确认配置文件配置:

[root@master simple-microservice-dev3]# ls portal-service/src/main/resources/

application-dev.yml  application-fat.yml  application.yml  static  templates

[root@master simple-microservice-dev3]# cat portal-service/src/main/resources/application.yml

server:

  port: 8080

  undertow:

    io-threads: 16

    worker-threads: 256

    buffer-size: 1024

    direct-buffers: true

spring:

  application:

    name: portal-service

  profiles:

    active: fat   #启用的配置文件是fat.yml

[root@master simple-microservice-dev3]# cat portal-service/src/main/resources/application-fat.yml

eureka:

  instance:

    prefer-ip-address: true

  client:

    service-url:

      defaultZone: http://eureka-0.eureka.ms:8888/eureka,http://eureka-1.eureka.ms:8888/eureka,http://eureka-2.eureka.ms:8888/eureka

    register-with-eureka: true

    fetch-registry: true

spring:

  freemarker:

    allow-request-override: false

    allow-session-override: false

    cache: true

    charset: UTF-8

    check-template-location: true

    content-type: text/html

    enabled: true

    expose-request-attributes: false

    expose-session-attributes: false

    expose-spring-macro-helpers: true

    prefer-file-system-access: true

    suffix: .ftl

    template-loader-path: classpath:/templates/

2)根据准备和修改好的代码配置文件编译打包—打成注册中心jar包:

[root@master simple-microservice-dev3]# mvn clean package -D maven.test.skip=true   #编译打包成jar包,所有项目都打,但目前只用gateway

[root@master simple-microservice-dev3]# ls portal-service/

Dockerfile  pom.xml  src  target

[root@master simple-microservice-dev3]# ls portal-service/target/

classes  generated-sources  maven-archiver  maven-status  portal-service.jar  portal-service.jar.original

3)使用打成的jar包构建成注册中心的镜像并推送到镜像仓库:

[root@master simple-microservice-dev3]# cd portal-service/

[root@master portal-service]# ls

Dockerfile  pom.xml  src  target

[root@master portal-service]# ls target/portal-service.jar

target/portal-service.jar

[root@master portal-service]# cat Dockerfile    #Dockefile文件构建镜像并推送到仓库

FROM java:8-jdk-alpine

LABEL maintainer lizhenliang/www.ctnrs.com

RUN  apk add -U tzdata && \

     ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime

COPY ./target/portal-service.jar ./

EXPOSE 8080

CMD java -jar /portal-service.jar

[root@master portal-service]# docker build -t 192.168.171.128:5000/portal:v2 .

[root@master portal-service]# docker images |grep portal

192.168.171.128:5000/portal                                                      v2                    ad684a1fdacc        14 seconds ago      190MB

[root@master portal-service]# vim /etc/docker/daemon.json    #所有节点都需要配置

{

"insecure-registries":["192.168.171.128:5000"]

}

[root@master portal-service]# systemctl restart docker

[root@master portal-service]# docker push 192.168.171.128:5000/portal:v2  

[root@master portal-service]# curl http://192.168.171.128:5000/v2/_catalog

{"repositories":["eureka","gateway","order","portal","product","quay.io/nginx-ingress-controller","stock"]}

[root@master portal-service]# curl http://192.168.171.128:5000/v2/portal/tags/list

{"name":"portal","tags":["v2"]}

4)k8s部署portal服务

[root@master order-service-biz]# cd ../../k8s/

[root@master k8s]# ls

docker_build.sh  eureka.yaml.bak  order.yaml   product.yaml

eureka.yaml      gateway.yaml     portal.yaml  stock.yaml

[root@master k8s]# vim portal.yaml     #前提是安装ingress-controller,前面已经安装  

apiVersion: extensions/v1beta1

kind: Ingress

metadata:

  name: portal

  namespace: ms

spec:

  rules:

    - host: portal.ctnrs.com      #关联前端域名

      http:

        paths:

        - path: /

          backend:

            serviceName: portal   #关联后端service的name

            servicePort: 8080     #关联后端service的port

---

apiVersion: v1

kind: Service

metadata:

  name: portal

  namespace: ms

spec:

  ports:

  - port: 8080

    name: portal

  selector:

    project: ms

    app: portal

---

apiVersion: apps/v1

kind: Deployment

metadata:

  name: portal

  namespace: ms

spec:

  replicas: 2

  selector:

    matchLabels:

      project: ms

      app: portal

  template:

    metadata:

      labels:

        project: ms

        app: portal

    spec:

      imagePullSecrets:

      - name: registry-pull-secret

      containers:

      - name: portal

        image: 192.168.171.128:5000/portal:v2   #修改镜像

        imagePullPolicy: Always

        ports:

          - protocol: TCP

            containerPort: 8080

        resources:

          requests:

            cpu: 0.5

            memory: 256Mi

          limits:

            cpu: 1

            memory: 1Gi

        readinessProbe:

          tcpSocket:

            port: 8080

          initialDelaySeconds: 60

          periodSeconds: 10

        livenessProbe:

          tcpSocket:

            port: 8080

          initialDelaySeconds: 60

          periodSeconds: 10

[root@master k8s]# kubectl create namespace ms   #前面已操作过

[root@master k8s]# kubectl apply -f portal.yaml

[root@master k8s]# kubectl get pod,svc,ing -n ms -o wide

[root@master k8s]# kubectl get pod,svc,ing -n ms -o wide

Xxx

......

NAME              TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE   SELECTOR

service/eureka    ClusterIP   None           <none>        8888/TCP   39m   app=eureka,project=ms

service/gateway   ClusterIP   10.1.22.112    <none>        9999/TCP   34m   app=gateway,project=ms

service/portal    ClusterIP   10.1.204.186   <none>        8080/TCP   31m   app=portal,project=ms

NAME                         HOSTS               ADDRESS   PORTS   AGE

ingress.extensions/eureka    eureka.ctnrs.com              80      39m

ingress.extensions/gateway   gateway.ctnrs.com             80      34m

ingress.extensions/portal    portal.ctnrs.com              80      31m

12.windows配置hosts解析:

192.168.171.129 eureka.ctnrs.com gateway.ctnrs.com portal.ctnrs.com

192.168.171.130 eureka.ctnrs.com gateway.ctnrs.com portal.ctnrs.com

13.浏览器访问网站: http://portal.ctnrs.com/

 点击查询商品服务,能从数据库里查出要购买的商品,如下:

 选择两件商品,购买,显示下单成功后,关闭页面,点击查询订单服务,如下:

点击查询订单服务,即显示已经购买的商品,如下: 

 所有服务都部署完成后,可以在注册中心页面查看已经注册进来,如下:(包括portal,此处内存资源不足,老是重启,所以没显示)

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

运维实战课程

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值