k8s发布生产环境tomcat实战案例,使用ingress做域名代理

K8s1.16发布生产环境tomcat—nginx-ingress做域名代理

java代码编译打包(打包成jar包或war包)格式命令:

#cd 源代码目录

# mvn clean package -D maven.test.skip=true [-P prod]

#解释:-P prod:编译构建时引用哪一个配置文件

# clean: 清除目录中上一次操作生成的结果包,重新打新的包

# pacakge: 是生成打新的包

# -D maven.test.skip=true:  跳过单元测试

注意:传统web服务,使用k8s部署时候,pod的站点目录没必要挂载宿主机的目录或共享nfs目录永久存储,因代码包在打包镜像时候已经将代码包打进了镜像里,随时部署时候使用镜像部署即可,但可能需挂载configmap以解耦配置文件,方便修改web服务的配置文件。

机器规划: k8s-master:  192.168.171.128     k8s-node1: 192.168.171.129     k8s-node2: 192.168.171.130

[root@k8s-master ~]# kubectl get nodes

NAME        STATUS   ROLES    AGE   VERSION

k8s-node1   Ready    <none>   18d   v1.16.0

k8s-node2   Ready    <none>   18d   v1.16.0

0.master机器安装二进制方式安装docker18.09.6:

[root@k8s-master ~]# ls docker_banery_18.09.tar.gz

docker_banery_18.09.tar.gz

[root@k8s-master ~]# tar -zxf docker_banery_18.09.tar.gz

[root@k8s-master ~]# ls docker18.09/

daemon.json  docker-18.09.6.tgz  docker.service

[root@k8s-master ~]# cd docker18.09/

[root@k8s-master docker18.09]# ls

daemon.json  docker-18.09.6.tgz  docker.service

[root@k8s-master docker18.09]# tar -zxf docker-18.09.6.tgz

[root@k8s-master ~]# ls docker    #docker二进制文件

containerd  containerd-shim  ctr  docker  dockerd  docker-init  docker-proxy  runc

[root@k8s-master docker18.09]# mv docker/* /usr/bin/   #将docker可执行文件移动到系统环境变量,方便使用命令

[root@k8s-master docker18.09]# cat docker.service       #交给systemd管理的docker配置文件

[Unit]

Description=Docker Application Container Engine

Documentation=https://docs.docker.com

After=network-online.target firewalld.service containerd.service

Wants=network-online.target

[Service]

Type=notify

ExecStart=/usr/bin/dockerd

ExecReload=/bin/kill -s HUP $MAINPID

TimeoutSec=0

RestartSec=2

Restart=always

StartLimitBurst=3

StartLimitInterval=60s

LimitNOFILE=infinity

LimitNPROC=infinity

LimitCORE=infinity

TasksMax=infinity

Delegate=yes

KillMode=process

[Install]

WantedBy=multi-user.target

[root@k8s-node1 ~]# ls daemon.json

daemon.json

[root@k8s-master docker18.09]#mkdir /etc/docker

[root@k8s-master docker18.09]# mv daemon.json /etc/docker/

[root@k8s-master docker18.09]# vim /etc/docker/daemon.json   

{

    "registry-mirrors": ["http://bc437cce.m.daocloud.io"],

    "insecure-registries": ["192.168.171.128:5000"]

}  

[root@k8s-master docker18.09]# mv docker.service /usr/lib/systemd/system/

[root@k8s-master docker18.09]# systemctl daemon-reload

[root@k8s-master docker18.09]# systemctl start docker

[root@k8s-master docker18.09]# systemctl status docker

● docker.service - Docker Application Container Engine

   Loaded: loaded (/usr/lib/systemd/system/docker.service; disabled; vendor preset: disabled)

   Active: active (running) since Tue 2019-11-05 23:28:26 CST; 7s ago

[root@k8s-master docker18.09]# systemctl enable docker

[root@k8s-master docker18.09]# docker info

Containers: 0

 Running: 0

 Paused: 0

 Stopped: 0

Images: 0

Server Version: 18.09.6

Storage Driver: overlay2

[root@k8s-master docker18.09]# cd

1.搭建私有仓库docker-harbor  宿主机方式搭建(也可单独一台机器搭建)

[root@k8s-master ~]# yum -y install docker-distribution.x86_64

[root@k8s-master ~]# systemctl enable docker-distribution.service

[root@k8s-master ~]# systemctl restart docker-distribution.service

[root@k8s-master ~]# cat /etc/docker/daemon.json  #设置当docker客户端端机器和私有仓库交互式,不用https证书验证

{

    "registry-mirrors": ["http://bc437cce.m.daocloud.io"],

    "insecure-registries": ["192.168.171.128:5000"]     #docker-harbor机器的ip地址和端口:5000

}  

[root@k8s-master ~]# systemctl restart docker

[root@k8s-master ~]# docker pull centos

[root@k8s-master ~]# docker images |grep centos

centos              latest              0f3e07c0138f        7 weeks ago         220MB

[root@k8s-master ~]# docker tag centos:latest 192.168.171.128:5000/centos:v1

[root@k8s-master ~]# docker images |grep centos

192.168.171.128:5000/centos   v1                  0f3e07c0138f        7 weeks ago         220MB

centos                        latest              0f3e07c0138f        7 weeks ago         220MB

[root@k8s-master ~]# docker push 192.168.171.128:5000/centos:v1

[root@k8s-master ~]# curl http://192.168.171.128:5000/v2/_catalog

{"repositories":["centos"]}

[root@k8s-master ~]# curl http://192.168.171.128:5000/v2/centos/tags/list

{"name":"centos","tags":["v1"]}

2.安装jdk1.8

[root@k8s-master ~]# ls jdk-8u144-linux-x64.tar.gz

jdk-8u144-linux-x64.tar.gz

[root@k8s-master ~]# tar -zxf jdk-8u144-linux-x64.tar.gz

[root@k8s-master ~]# mv jdk1.8.0_144/ /usr/local/

[root@k8s-master ~]# ls /usr/local/jdk1.8.0_144/

bin  COPYRIGHT  db  include  javafx-src.zip  jre  lib  LICENSE  man  README.html  release  src.zip  THIRDPARTYLICENSEREADME-JAVAFX.txt  THIRDPARTYLICENSEREADME.txt

[root@k8s-master ~]# vim /etc/profile

export JAVA_HOME=/usr/local/jdk1.8.0_144

export PATH=$JAVA_HOME/bin:$PATH

export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

[root@k8s-master ~]# source  /etc/profile

[root@k8s-master ~]# java -version

java version "1.8.0_144"

Java(TM) SE Runtime Environment (build 1.8.0_144-b01)

Java HotSpot(TM) 64-Bit Server VM (build 25.144-b01, mixed mode)

3.安装mvn

[root@k8s-master ~]# ls apache-maven-3.5.3-bin.tar.gz

apache-maven-3.5.3-bin.tar.gz

[root@k8s-master ~]# tar -zxf apache-maven-3.5.3-bin.tar.gz

[root@k8s-master ~]# mv apache-maven-3.5.3 /usr/local/

[root@k8s-master ~]# ls /usr/local/apache-maven-3.5.3/

bin  boot  conf  lib  LICENSE  NOTICE  README.txt

[root@k8s-master ~]# vim /etc/profile

export JAVA_HOME=/usr/local/jdk1.8.0_144

export PATH=$JAVA_HOME/bin:$PATH

export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

export MVN_HOME=/usr/local/apache-maven-3.5.3

export PATH=$PATH:$MVN_HOME/bin

[root@k8s-master ~]# source  /etc/profile

[root@k8s-master ~]# mvn -v

Apache Maven 3.5.3 (3383c37e1f9e9b3bc3df5050c29c8aff9f295297; 2018-02-25T03:49:05+08:00)

Maven home: /usr/local/apache-maven-3.5.3

Java version: 1.8.0_144, vendor: Oracle Corporation

Java home: /usr/local/jdk1.8.0_144/jre

Default locale: en_US, platform encoding: UTF-8

OS name: "linux", version: "3.10.0-514.el7.x86_64", arch: "amd64", family: "unix"

4.master节点安装mysql,也可其他节点单独安装

[root@k8s-master ~]# yum -y install mariadb mariadb-server

[root@k8s-master ~]# systemctl start mariadb

[root@k8s-master ~]# systemctl enable mariadb

[root@k8s-master ~]# mysql   安装启动mariadb后,不需密码即可登录

MariaDB [(none)]> show databases;

+--------------------+

| Database           |

+--------------------+

| information_schema |

| mysql              |

| performance_schema |

| test               |

+--------------------+

MariaDB [(none)]> quit

接下来进行MariaDB的相关简单配置,使用命令: mysql_secure_installation

[root@k8s-master ~]# mysql_secure_installation   #简单安全安装配置,回车

NOTE: RUNNING ALL PARTS OF THIS SCRIPT IS RECOMMENDED FOR ALL MariaDB

      SERVERS IN PRODUCTION USE!  PLEASE READ EACH STEP CAREFULLY!

In order to log into MariaDB to secure it, we'll need the current

password for the root user.  If you've just installed MariaDB, and

you haven't set the root password yet, the password will be blank,

so you should just press enter here.

Enter current password for root (enter for none):    回车,输入当前密码,无,直接回车即可

OK, successfully used password, moving on...

Setting the root password ensures that nobody can log into the MariaDB

root user without the proper authorisation.  

Set root password? [Y/n] y

New password:             输入要设置的root密码: 123

Re-enter new password:    确认root的密码: 123

Password updated successfully!

Reloading privilege tables..

 ... Success!

By default, a MariaDB installation has an anonymous user, allowing anyone

to log into MariaDB without having to have a user account created for

them.  This is intended only for testing, and to make the installation

go a bit smoother.  You should remove them before moving into a

production environment.   #下面一路都是y

Remove anonymous users? [Y/n] y  是否删除匿名用户,回车

 ... Success!

Normally, root should only be allowed to connect from 'localhost'.  This

ensures that someone cannot guess at the root password from the network.

Disallow root login remotely? [Y/n] y  是否禁止root远程登录,回车

 ... Success!

By default, MariaDB comes with a database named 'test' that anyone can

access.  This is also intended only for testing, and should be removed

before moving into a production environment.

Remove test database and access to it? [Y/n] y  是否删除test数据库,回车

 - Dropping test database...

 ... Success!

 - Removing privileges on test database...

 ... Success!

Reloading the privilege tables will ensure that all changes made so far

will take effect immediately.

Reload privilege tables now? [Y/n] y  是否重新加载权限表,回车

 ... Success!

Cleaning up...

All done!  If you've completed all of the above steps, your MariaDB

installation should now be secure.

Thanks for using MariaDB!

[root@k8s-master ~]# mysql -uroot -p123  #使用设置的密码登录mysql

MariaDB [(none)]> show databases;

+--------------------+

| Database           |

+--------------------+

| information_schema |

| mysql              |

| performance_schema |

+--------------------+

MariaDB [(none)]> grant all on *.* to root@'%' identified by '123456';    #再授权一个能远程连接mysql的root密码

5.克隆java代码,编译代码,修改连接数据库设置,打包镜像,推送到镜像仓库:

[root@k8s-master ~]# mkdir tomcat-java

[root@k8s-master ~]# cd tomcat-java/

[root@k8s-master tomcat-java]# git clone GitHub - lizhenliang/tomcat-java-demo: 美女示例(Java环境)

[root@k8s-master tomcat-java]# ls

nfs-client  nfs-client.tar.gz  tomcat-java-demo

[root@k8s-master tomcat-java]# ls tomcat-java-demo/

db  Dockerfile  LICENSE  pom.xml  README.md  src

[root@k8s-master tomcat-java]# ls tomcat-java-demo/db/

tables_ly_tomcat.sql

[root@k8s-master tomcat-java]# cat tomcat-java-demo/db/tables_ly_tomcat.sql

/*

MySQL - 5.6.30-log : Database - test

*/

CREATE DATABASE IF NOT EXISTS `test`  DEFAULT CHARACTER SET utf8 ;

USE `test`;

CREATE TABLE `user` (

  `id` INT(11) NOT NULL AUTO_INCREMENT,

  `name` VARCHAR(100) NOT NULL COMMENT '名字',

  `age` INT(3) NOT NULL COMMENT '年龄',

  `sex` CHAR(1) DEFAULT NULL COMMENT '性别',

  PRIMARY KEY (`id`)

) ENGINE=INNODB DEFAULT CHARSET=utf8;

Mysql中导入数据:

[root@k8s-master tomcat-java]# mysql -uroot -p123

MariaDB [(none)]> source /root/tomcat-java/tomcat-java-demo/db/tables_ly_tomcat.sql;

MariaDB [test]> show databases;

+--------------------+

| Database           |

+--------------------+

| information_schema |

| mysql              |

| performance_schema |

| test               |

+--------------------+

MariaDB [test]> use test;

Database changed

MariaDB [test]> show tables;

+----------------+

| Tables_in_test |

+----------------+

| user           |

+----------------+

MariaDB [test]> quit      #查看user表目前是空表,因为程序设计,还没往里面写数据,等界面上添加数据后,就有了

[root@k8s-master tomcat-java]# cd tomcat-java-demo

[root@k8s-master tomcat-java-demo]# ls

db  Dockerfile  LICENSE  pom.xml  README.md  src

[root@k8s-master tomcat-java-demo]# ls src/main/resources/

application.yml  log4j.properties  static  templates

[root@k8s-master tomcat-java-demo]# vim src/main/resources/application.yml    #修改程序连接mysql的信息

server:

  port: 8080

spring:

  datasource:

    url: jdbc:mysql://192.168.171.128:3306/test?characterEncoding=utf-8

    username: root

    password: 123456

    driver-class-name: com.mysql.jdbc.Driver

  freemarker:

    allow-request-override: false

    cache: true

    check-template-location: true

    charset: UTF-8

    content-type: text/html; charset=utf-8

    expose-request-attributes: false

    expose-session-attributes: false

    expose-spring-macro-helpers: false

    suffix: .ftl

    template-loader-path:

      - classpath:/templates/

[root@k8s-master tomcat-java-demo]# cat Dockerfile     #查看Dockefile,基础镜像lizhenliang/tomcat其实是已经能运行的tomcat镜像,已下载save

FROM lizhenliang/tomcat

LABEL maintainer www.ctnrs.com

RUN rm -rf /usr/local/tomcat/webapps/*

ADD target/*.war /usr/local/tomcat/webapps/ROOT.war

[root@k8s-master tomcat-java-demo]# ls /opt/lizhenliang_tomcat_jdk.tar

/opt/lizhenliang_tomcat_jdk.tar

[root@k8s-master tomcat-java-demo]# docker load -i /opt/lizhenliang_tomcat_jdk.tar

[root@k8s-master tomcat-java-demo]# docker images |grep lizhenliang

lizhenliang/tomcat            latest              143035d83fdc        11 months ago       388MB

[root@k8s-master tomcat-java-demo]# ls

db  Dockerfile  LICENSE  pom.xml  README.md  src

[root@k8s-master tomcat-java-demo]# cat pom.xml |grep war    #查看打成war包形式

  <packaging>war</packaging>

      <artifactId>maven-war-plugin</artifactId>

[root@k8s-master tomcat-java-demo]# mvn clean package      #编译构建成war包,从网上下载相关mvn构建依赖包将源代码进行编译构建

......

[INFO] Building war: /root/tomcat-java/tomcat-java-demo/target/ly-simple-tomcat-0.0.1-SNAPSHOT.war

[INFO] ------------------------------------------------------------------------

[INFO] BUILD SUCCESS

[INFO] ------------------------------------------------------------------------

[INFO] Total time: 02:15 min

[INFO] Finished at: 2019-11-24T10:12:50+08:00

[INFO] ------------------------------------------------------------------------

[root@k8s-master tomcat-java-demo]# ls

db  Dockerfile  LICENSE  pom.xml  README.md  src  target

[root@k8s-master tomcat-java-demo]# ls target/

classes  generated-sources  ly-simple-tomcat-0.0.1-SNAPSHOT  ly-simple-tomcat-0.0.1-SNAPSHOT.war  maven-archiver  maven-status

[root@k8s-master tomcat-java-demo]# docker build -t 192.168.171.128:5000/java-demo:1.0 .   #打包成项目镜像

[root@k8s-master tomcat-java-demo]# docker images |grep java-demo

192.168.171.128:5000/java-demo   1.0                 d41d6cb2b289        18 seconds ago      406MB

[root@k8s-master tomcat-java-demo]# docker push 192.168.171.128:5000/java-demo:1.0   #将项目镜像推送到镜像仓库

[root@k8s-master tomcat-java-demo]# cd /root/tomcat-java/

[root@k8s-master tomcat-java]# ls

tomcat-java-demo  tomcat-java-demo.tar.gz

6.使用项目镜像部署tomcat应用——挂载configmap,以方便以后能修改tomcat的配置

[root@k8s-master tomcat-java]# rz

上传java-demo-yaml.tar.gz

[root@k8s-master tomcat-java]# ls

java-demo-yaml.tar.gz  tomcat-java-demo  tomcat-java-demo.tar.gz

[root@k8s-master tomcat-java]# tar -zxf java-demo-yaml.tar.gz

[root@k8s-master tomcat-java]# ls

java-demo-yaml  java-demo-yaml.tar.gz  tomcat-java-demo  tomcat-java-demo.tar.gz

[root@k8s-master tomcat-java]# cd java-demo-yaml

[root@k8s-master java-demo-yaml]# ls

deployment.yaml  namespace.yaml  server.xml  service.yaml

[root@k8s-master java-demo-yaml]# vim server.xml  #自定义tomcat的配置文件,以创建configmap

<?xml version='1.0' encoding='utf-8'?>

<!--

  Licensed to the Apache Software Foundation (ASF) under one or more

  contributor license agreements.  See the NOTICE file distributed with

  this work for additional information regarding copyright ownership.

  The ASF licenses this file to You under the Apache License, Version 2.0

  (the "License"); you may not use this file except in compliance with

  the License.  You may obtain a copy of the License at

      http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software

  distributed under the License is distributed on an "AS IS" BASIS,

  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

  See the License for the specific language governing permissions and

  limitations under the License.

-->

<!-- Note:  A "Server" is not itself a "Container", so you may not

     define subcomponents such as "Valves" at this level.

     Documentation at /docs/config/server.html

 -->

<Server port="8005" shutdown="SHUTDOWN">

  <Listener className="org.apache.catalina.startup.VersionLoggerListener" />

  <!-- Security listener. Documentation at /docs/config/listeners.html

  <Listener className="org.apache.catalina.security.SecurityListener" />

  -->

  <!--APR library loader. Documentation at /docs/apr.html -->

  <Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />

  <!-- Prevent memory leaks due to use of particular java/javax APIs-->

  <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />

  <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />

  <Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />

  <!-- Global JNDI resources

       Documentation at /docs/jndi-resources-howto.html

  -->

  <GlobalNamingResources>

    <!-- Editable user database that can also be used by

         UserDatabaseRealm to authenticate users

    -->

    <Resource name="UserDatabase" auth="Container"

              type="org.apache.catalina.UserDatabase"

              description="User database that can be updated and saved"

              factory="org.apache.catalina.users.MemoryUserDatabaseFactory"

              pathname="conf/tomcat-users.xml" />

  </GlobalNamingResources>

  <!-- A "Service" is a collection of one or more "Connectors" that share

       a single "Container" Note:  A "Service" is not itself a "Container",

       so you may not define subcomponents such as "Valves" at this level.

       Documentation at /docs/config/service.html

   -->

  <Service name="Catalina">

    <!--The connectors can use a shared executor, you can define one or more named thread pools-->

    <!--

    <Executor name="tomcatThreadPool" namePrefix="catalina-exec-"

        maxThreads="150" minSpareThreads="4"/>

    -->

    <!-- A "Connector" represents an endpoint by which requests are received

         and responses are returned. Documentation at :

         Java HTTP Connector: /docs/config/http.html (blocking & non-blocking)

         Java AJP  Connector: /docs/config/ajp.html

         APR (HTTP/AJP) Connector: /docs/apr.html

         Define a non-SSL/TLS HTTP/1.1 Connector on port 8080

    -->

    <Connector port="8080" protocol="HTTP/1.1"

               connectionTimeout="20000"

               redirectPort="8443" />

    <!-- A "Connector" using the shared thread pool-->

    <!--

    <Connector executor="tomcatThreadPool"

               port="8080" protocol="HTTP/1.1"

               connectionTimeout="20000"

               redirectPort="8443" />

    -->

    <!-- Define a SSL/TLS HTTP/1.1 Connector on port 8443

         This connector uses the NIO implementation that requires the JSSE

         style configuration. When using the APR/native implementation, the

         OpenSSL style configuration is required as described in the APR/native

         documentation -->

    <!--

    <Connector port="8443" protocol="org.apache.coyote.http11.Http11NioProtocol"

               maxThreads="150" SSLEnabled="true" scheme="https" secure="true"

               clientAuth="false" sslProtocol="TLS" />

    -->

    <!-- Define an AJP 1.3 Connector on port 8009 -->

    <Connector port="8009" protocol="AJP/1.3" redirectPort="8443" />

    <!-- An Engine represents the entry point (within Catalina) that processes

         every request.  The Engine implementation for Tomcat stand alone

         analyzes the HTTP headers included with the request, and passes them

         on to the appropriate Host (virtual host).

         Documentation at /docs/config/engine.html -->

    <!-- You should set jvmRoute to support load-balancing via AJP ie :

    <Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">

    -->

    <Engine name="Catalina" defaultHost="localhost">

      <!--For clustering, please take a look at documentation at:

          /docs/cluster-howto.html  (simple how to)

          /docs/config/cluster.html (reference documentation) -->

      <!--

      <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>

      -->

      <!-- Use the LockOutRealm to prevent attempts to guess user passwords

           via a brute-force attack -->

      <Realm className="org.apache.catalina.realm.LockOutRealm">

        <!-- This Realm uses the UserDatabase configured in the global JNDI

             resources under the key "UserDatabase".  Any edits

             that are performed against this UserDatabase are immediately

             available for use by the Realm.  -->

        <Realm className="org.apache.catalina.realm.UserDatabaseRealm"

               resourceName="UserDatabase"/>

      </Realm>

      <Host name="localhost"  appBase="webapps"

            unpackWARs="true" autoDeploy="true">

        <!-- SingleSignOn valve, share authentication between web applications

             Documentation at: /docs/config/valve.html -->

        <!--

        <Valve className="org.apache.catalina.authenticator.SingleSignOn" />

        -->

        <!-- Access log processes all example.

             Documentation at: /docs/config/valve.html

             Note: The pattern used is equivalent to using pattern="common" -->

        <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"

               prefix="localhost_access_log" suffix=".txt"

               pattern="%h %l %u %t "%r" %s %b" />

      </Host>

    </Engine>

  </Service>

</Server>

根据自定义配置文件创建configmap文件-供应用pod挂载配置文件:

[root@k8s-master java-demo]# kubectl create configmap tomcat-configmap --from-file=./server.xml -n test

[root@k8s-master java-demo]# kubectl get configmap -n test

NAME               DATA   AGE

tomcat-configmap   1      21s

[root@k8s-master java-demo]# kubectl describe configmap tomcat-configmap  #查看configmap的内容信息,和上面配置文件内容一样

........

[root@k8s-master java-demo-yaml]# ls

deployment.yaml  namespace.yaml  server.xml  service.yaml

[root@k8s-master java-demo-yaml]# cat namespace.yaml

apiVersion: v1

kind: Namespace

metadata:

  name: test

[root@k8s-master java-demo-yaml]# cat service.yaml

apiVersion: v1

kind: Service

metadata:

  name: tomcat-java-demo

  namespace: test

spec:

  selector:

    project: www

    app: java-demo

  ports:

  - name: web

    port: 80

    targetPort: 8080

  type: NodePort

[root@k8s-master java-demo-yaml]# cat deployment.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  name: tomcat-java-demo

  namespace: test

spec:

  replicas: 3

  selector:

    matchLabels:

      project: www

      app: java-demo

  template:

    metadata:

      labels:

        project: www

        app: java-demo

    spec:

      containers:

      - name: tomcat

        image: 192.168.171.128:5000/java-demo:1.0

        imagePullPolicy: IfNotPresent

        ports:

        - containerPort: 8080

          name: web

          protocol: TCP

        resources:

          requests:

            cpu: 0.5

            memory: 1Gi

          limits:

            cpu: 1

            memory: 2Gi

        livenessProbe:

          httpGet:

            path: /

            port: 8080

          initialDelaySeconds: 60

          timeoutSeconds: 20

        readinessProbe:

          httpGet:

            path: /

            port: 8080

          initialDelaySeconds: 60

          timeoutSeconds: 20

       #下面为挂载configmap

        volumeMounts:

        - name: httpconfigmap-volume

          mountPath: /usr/local/tomcat/conf/server.xml

          subPath: path/to/server.xml

      volumes:

      - name: httpconfigmap-volume

        configMap:

          name: tomcat-configmap

          defaultMode: 0777

          items:

          - key: server.xml

            path: path/to/server.xml

[root@k8s-master java-demo-yaml]# kubectl apply -f namespace.yaml   #创建命名空间:test

[root@k8s-master java-demo-yaml]# kubectl get ns

NAME                   STATUS   AGE

default                Active   18d

kube-node-lease        Active   18d

kube-public            Active   18d

kube-system            Active   18d

kubernetes-dashboard   Active   18d

test                   Active   5s

[root@k8s-master java-demo-yaml]# kubectl apply -f deployment.yaml   #k8s部署java项目应用

[root@k8s-master java-demo-yaml]# kubectl get pod -n test -o wide

NAME                         READY   STATUS    RESTARTS   AGE   IP            NODE        NOMINATED NODE   READINESS GATES

tomcat-java-demo-78b6599cc6-75zkd   1/1     Running   0          72s   10.244.1.30   k8s-node2   <none>           <none>

tomcat-java-demo-78b6599cc6-ttfhh   1/1     Running   0          72s   10.244.0.19   k8s-node1   <none>           <none>

tomcat-java-demo-78b6599cc6-zgw9j   1/1     Running   0          72s   10.244.1.31   k8s-node2   <none>           <none>

[root@k8s-master java-demo-yaml]# kubectl apply -f service.yaml      #创建pod应用的service

[root@k8s-master java-demo-yaml]# kubectl get svc -n test

NAME               TYPE       CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE

tomcat-java-demo   NodePort   10.0.0.60    <none>        80:31645/TCP   46s

[root@k8s-master java-demo-yaml]# cd ..

[root@k8s-master tomcat-java]# ls

java-demo-yaml  java-demo-yaml.tar.gz  tomcat-java-demo  tomcat-java-demo.tar.gz

7.通过nodePort可以访问,如下:

8.通过部署ingress-controller和ingress发布出去,通过域名能访问网站:

[root@k8s-master tomcat-java]# ls

java-demo-yaml  java-demo-yaml.tar.gz  tomcat-java-demo  tomcat-java-demo.tar.gz

[root@k8s-master tomcat-java]# rz

上传ingress-nginx镜像和yaml文件

[root@k8s-master tomcat-java]# ls

java-demo-yaml  java-demo-yaml.tar.gz  mandatory-ingress_yua_ban.yaml  nginx-ingress-controller.tar  tomcat-java-demo  tomcat-java-demo.tar.gz

部署ingress-controller:

[root@k8s-master tomcat-java]# docker load -i nginx-ingress-controller.tar     #导入ingress-controller镜像

[root@k8s-master tomcat-java]# docker images |grep ingress

quay.io/kubernetes-ingress-controller/nginx-ingress-controller   0.24.1              98675eb54d0e        7 months ago        631MB

[root@k8s-master tomcat-java]# docker tag quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.24.1 192.168.171.128:5000/ingress-controller:1.0

[root@k8s-master tomcat-java]# docker images |grep ingress

192.168.171.128:5000/ingress-controller                          1.0                 98675eb54d0e        7 months ago        631MB

quay.io/kubernetes-ingress-controller/nginx-ingress-controller   0.24.1              98675eb54d0e        7 months ago        631MB

[root@k8s-master tomcat-java]# docker push 192.168.171.128:5000/ingress-controller:1.0  #推送到镜像仓库

[root@k8s-master tomcat-java]# vim mandatory-ingress_yua_ban.yaml    #编辑修改ingress-controller的yaml文件

apiVersion: v1

kind: Namespace

metadata:

  name: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

---

kind: ConfigMap

apiVersion: v1

metadata:

  name: nginx-configuration

  namespace: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

---

kind: ConfigMap

apiVersion: v1

metadata:

  name: tcp-services

  namespace: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

---

kind: ConfigMap

apiVersion: v1

metadata:

  name: udp-services

  namespace: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

---

apiVersion: v1

kind: ServiceAccount

metadata:

  name: nginx-ingress-serviceaccount

  namespace: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

---

apiVersion: rbac.authorization.k8s.io/v1beta1

kind: ClusterRole

metadata:

  name: nginx-ingress-clusterrole

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

rules:

  - apiGroups:

      - ""

    resources:

      - configmaps

      - endpoints

      - nodes

      - pods

      - secrets

    verbs:

      - list

      - watch

  - apiGroups:

      - ""

    resources:

      - nodes

    verbs:

      - get

  - apiGroups:

      - ""

    resources:

      - services

    verbs:

      - get

      - list

      - watch

  - apiGroups:

      - "extensions"

    resources:

      - ingresses

    verbs:

      - get

      - list

      - watch

  - apiGroups:

      - ""

    resources:

      - events

    verbs:

      - create

      - patch

  - apiGroups:

      - "extensions"

    resources:

      - ingresses/status

    verbs:

      - update

---

apiVersion: rbac.authorization.k8s.io/v1beta1

kind: Role

metadata:

  name: nginx-ingress-role

  namespace: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

rules:

  - apiGroups:

      - ""

    resources:

      - configmaps

      - pods

      - secrets

      - namespaces

    verbs:

      - get

  - apiGroups:

      - ""

    resources:

      - configmaps

    resourceNames:

      # Defaults to "<election-id>-<ingress-class>"

      # Here: "<ingress-controller-leader>-<nginx>"

      # This has to be adapted if you change either parameter

      # when launching the nginx-ingress-controller.

      - "ingress-controller-leader-nginx"

    verbs:

      - get

      - update

  - apiGroups:

      - ""

    resources:

      - configmaps

    verbs:

      - create

  - apiGroups:

      - ""

    resources:

      - endpoints

    verbs:

      - get

---

apiVersion: rbac.authorization.k8s.io/v1beta1

kind: RoleBinding

metadata:

  name: nginx-ingress-role-nisa-binding

  namespace: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: Role

  name: nginx-ingress-role

subjects:

  - kind: ServiceAccount

    name: nginx-ingress-serviceaccount

    namespace: ingress-nginx

---

apiVersion: rbac.authorization.k8s.io/v1beta1

kind: ClusterRoleBinding

metadata:

  name: nginx-ingress-clusterrole-nisa-binding

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: nginx-ingress-clusterrole

subjects:

  - kind: ServiceAccount

    name: nginx-ingress-serviceaccount

    namespace: ingress-nginx

---

apiVersion: apps/v1

kind: DaemonSet

metadata:

  name: nginx-ingress-controller

  namespace: ingress-nginx

  labels:

    app.kubernetes.io/name: ingress-nginx

    app.kubernetes.io/part-of: ingress-nginx

spec:

  #replicas: 1  #注释,daemonset默认在每个节点都运行一个pod,不需指定副本数

  selector:

    matchLabels:

      app.kubernetes.io/name: ingress-nginx

      app.kubernetes.io/part-of: ingress-nginx

  template:

    metadata:

      labels:

        app.kubernetes.io/name: ingress-nginx

        app.kubernetes.io/part-of: ingress-nginx

      annotations:

        prometheus.io/port: "10254"

        prometheus.io/scrape: "true"

    spec:

      hostNetwork: true   #添加,使用宿主机物理网络,跟pod对应宿主机一个网络

      serviceAccountName: nginx-ingress-serviceaccount

      containers:

        - name: nginx-ingress-controller

          #image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.24.1

          image: 192.168.171.128:5000/ingress-controller:1.0  #修改

          imagePullPolicy: IfNotPresent    #添加,防止下载不了,老是always从镜像仓库下载,添加后可手工先下载到本地

          args:

            - /nginx-ingress-controller

            - --configmap=$(POD_NAMESPACE)/nginx-configuration

            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services

            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services

            - --publish-service=$(POD_NAMESPACE)/ingress-nginx

            - --annotations-prefix=nginx.ingress.kubernetes.io

          securityContext:

            allowPrivilegeEscalation: true

            capabilities:

              drop:

                - ALL

              add:

                - NET_BIND_SERVICE

            # www-data -> 33

            runAsUser: 33

          env:

            - name: POD_NAME

              valueFrom:

                fieldRef:

                  fieldPath: metadata.name

            - name: POD_NAMESPACE

              valueFrom:

                fieldRef:

                  fieldPath: metadata.namespace

          ports:

            - name: http

              containerPort: 80 #ingress-nginx控制器的容器端口

              hostPort: 80     #添加,指定物理机端口,来映射容器中端口,确保node节点不要占用该端口

            - name: https

              containerPort: 443

              hostPort: 443     #添加,指定物理机端口,来映射容器中端口,确保node节点不要占用该端口

          livenessProbe:

            failureThreshold: 3

            httpGet:

              path: /healthz

              port: 10254

              scheme: HTTP

            initialDelaySeconds: 10

            periodSeconds: 10

            successThreshold: 1

            timeoutSeconds: 10

          readinessProbe:

            failureThreshold: 3

            httpGet:

              path: /healthz

              port: 10254

              scheme: HTTP

            periodSeconds: 10

            successThreshold: 1

            timeoutSeconds: 10

---

[root@k8s-master tomcat-java]# kubectl get pod -n ingress-nginx -o wide

NAME                   READY   STATUS    RESTARTS   AGE     IP                NODE        NOMINATED NODE   READINESS GATES

nginx-ingress-controller-4n9vk   1/1     Running   0          3m41s   192.168.171.129   k8s-node1   <none>           <none>

nginx-ingress-controller-lp7k2   1/1     Running   0          3m41s   192.168.171.130   k8s-node2   <none>           <none>

9.部署ingress,将自定义的配置注入到ingress-controller中:

[root@k8s-master tomcat-java]# rz

上传ingress的yaml文件

[root@k8s-master tomcat-java]# ls

ingress.yaml  java-demo-yaml  java-demo-yaml.tar.gz  mandatory-ingress_yua_ban.yaml  nginx-ingress-controller.tar  tomcat-java-demo  tomcat-java-demo.tar.gz

[root@k8s-master tomcat-java]# cat ingress.yaml

apiVersion: extensions/v1beta1

kind: Ingress

metadata:

  name: tomcat-java-demo

  namespace: test

spec:

  rules:

    - host: java.ctnrs.com    #前端关联访问域名

      http:

        paths:

        - path: /

          backend:

            serviceName: tomcat-java-demo    #后端关联serverName

            servicePort: 80                  #servicePort,即:集群内部的端口

[root@k8s-master tomcat-java]# kubectl apply -f ingress.yaml    #部署ingress

[root@k8s-master tomcat-java]# kubectl get ingress -n test

NAME               HOSTS            ADDRESS   PORTS   AGE

tomcat-java-demo   java.ctnrs.com             80      44s

[root@k8s-master tomcat-java]# vim /etc/hosts

192.168.171.128 k8s-master

192.168.171.129 k8s-node1 java.ctnrs.com

192.168.171.130 k8s-node2 java.ctnrs.com

[root@k8s-master tomcat-java]# curl java.ctnrs.com  #通过域名访问网站

能访问网站

10.浏览器访问:(注意Windows也需要配置hosts解析)   java.ctnrs.com

 

11.通过修改configmap的内容,从而修改pod中nginx的配置文件:

[root@k8s-master tomcat-java]# kubectl get configmap -n test

NAME               DATA   AGE

tomcat-configmap   1      118m

[root@k8s-master tomcat-java]# kubectl edit configmap tomcat-configmap -n test

 <Host name="localhost"  appBase="/usr/local/"    #修改站点目录

注意:需要将pod删除后,然后自动重新启动的pod才会应用新的配置,

#kubectl delete pod xxx

#kubectl delete pod yyy

#kubectl delete pod zzz

重新生成的pod后,通过进入pod中,可以查看server.xml配置已更新,因此时站点目录找不到,所以无法访问网站,且健康检查检查不到,会一直重启pod,再修改configmap的配置修改回来,删了pod后,重新启动pod后,恢复正常。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

运维实战课程

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值