kubernetes二进制集群部署

本文档详细介绍了如何手动部署Kubernetes(k8s)二进制集群,包括安装依赖、配置Hosts、设置SSH免密、关闭防火墙和selinux、安装etcd、部署Master节点、配置Flannel、创建kubelet和kube-proxy配置,以及安装Dashboard和Heapster等监控组件。此外,还展示了如何使用nginx进行简单测试。
摘要由CSDN通过智能技术生成

* 安装 wget vim rsync

```bash

yum install -y wget vim rsync

```

* 配置Hosts

```bash

vim /etc/hosts

192.168.0.120 node120

192.168.0.121 node121

192.168.0.122 node122

```

* 执行ssh免密码登陆

```bash

ssh-keygen

ssh-copy-id node120

ssh-copy-id node121

ssh-copy-id node122

```

* 同步hosts

```bash

rsync /etc/hosts root@node120:/etc/hosts

rsync /etc/hosts root@node121:/etc/hosts

rsync /etc/hosts root@node122:/etc/hosts

```

* 关闭防火墙

```bash

systemctl stop firewalld

systemctl disable firewalld

```

* 关闭selinux

```bash

vim /etc/selinux/config

SELINUX=enforcing改为SELINUX=disabled

setenforce 0

```

* 关闭swap

```bash

swapoff -a

vim /etc/fstab #修改自动挂载配置,注释

/dev/mapper/centos-swap swap swap defaults 0 0

```

* cfssl安装

```bash

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64

wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64

wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64

mv cfssl_linux-amd64 /usr/local/bin/cfssl

mv cfssljson_linux-amd64 /usr/local/bin/cfssljson

mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

```

* 生成证书

```bash

 

mkdir /k8s/kubernetes/{bin,cfg,ssl} -p

 

mkdir /k8s/kubernetes/etcd/data

 

cd /k8s/kubernetes/ssl/

 

vim ca-config.json

 

{

"signing": {

"default": {

"expiry": "876000h"

},

"profiles": {

"kubernetes": {

"expiry": "876000h",

"usages": [

"signing",

"key encipherment",

"server auth",

"client auth"

]

}

}

}

}

 

vim ca-csr.json

 

{

"CN": "kubernetes",

"key": {

"algo": "rsa",

"size": 2048

},

"names": [

{

"C": "CN",

"L": "Beijing",

"ST": "Beijing",

"O": "k8s",

"OU": "System"

}

]

}

 

cfssl gencert -initca ca-csr.json | cfssljson -bare ca

 

vim etcd-csr.json

 

{

"CN": "etcd",

"hosts": [

"node120",

"node121",

"node122",

"192.168.0.120",

"192.168.0.121",

"192.168.0.122"

],

"key": {

"algo": "rsa",

"size": 2048

},

"names": [

{

"C": "CN",

"L": "Beijing",

"ST": "Beijing"

}

]

}

 

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \

-profile=kubernetes etcd-csr.json | cfssljson -bare etcd

 

vim kubernetes-csr.json

 

{

"CN": "kubernetes",

"hosts": [

"127.0.0.1",

"192.168.0.120",

"192.168.0.121",

"192.168.0.122",

"node120",

"node121",

"node122",

"10.254.0.1",

"10.0.0.1",

"kubernetes",

"kubernetes.default",

"kubernetes.default.svc",

"kubernetes.default.svc.cluster",

"kubernetes.default.svc.cluster.local"

],

"key": {

"algo": "rsa",

"size": 2048

},

"names": [

{

"C": "CN",

"ST": "BeiJing",

"L": "BeiJing",

"O": "k8s",

"OU": "System"

}

]

}

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \

-profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

 

vim kube-proxy-csr.json

 

{

"CN": "system:kube-proxy",

"hosts": [],

"key": {

"algo": "rsa",

"size": 2048

},

"names": [

{

"C": "CN",

"ST": "BeiJing",

"L": "BeiJing",

"O": "k8s",

"OU": "System"

}

]

}

 

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \

-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy


 

```

* 安装etcd

```bash

 

mkdir /opt/soft && cd /opt/soft

 

wget https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz

 

tar -xvzf etcd-v3.3.10-linux-amd64.tar.gz

 

cp etcd-v3.3.10-linux-amd64/etc* /k8s/kubernetes/bin/

 

chmod o+x -R /k8s/kubernetes/bin/

 

vim /k8s/kubernetes/cfg/etcd.conf

 

#[Member]

ETCD_NAME="etcd00"

ETCD_DATA_DIR="/k8s/kubernetes/etcd/data"

ETCD_LISTEN_PEER_URLS="https://192.168.0.120:2380"

ETCD_LISTEN_CLIENT_URLS="https://192.168.0.120:2379"

#[Clustering]

ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.0.120:2380"

ETCD_ADVERTISE_CLIENT_URLS="https://192.168.0.120:2379"

ETCD_INITIAL_CLUSTER="etcd00=https://192.168.0.120:2380,etcd01=https://192.168.0.121:2380,etcd02=https://192.168.0.122:2380"

ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"

ETCD_INITIAL_CLUSTER_STATE="new"

#[Security]

ETCD_CERT_FILE="/k8s/kubernetes/ssl/etcd.pem"

ETCD_KEY_FILE="/k8s/kubernetes/ssl/etcd-key.pem"

ETCD_TRUSTED_CA_FILE="/k8s/kubernetes/ssl/ca.pem"

ETCD_CLIENT_CERT_AUTH="true"

ETCD_PEER_CERT_FILE="/k8s/kubernetes/ssl/etcd.pem"

ETCD_PEER_KEY_FILE="/k8s/kubernetes/ssl/etcd-key.pem"

ETCD_PEER_TRUSTED_CA_FILE="/k8s/kubernetes/ssl/ca.pem"

ETCD_PEER_CLIENT_CERT_AUTH="true"

 

vim /usr/lib/systemd/system/etcd.service

 

[Unit]

Description=Etcd Server

After=network.target

After=network-online.target

Wants=network-online.target

[Service]

Type=notify

WorkingDirectory=/k8s/kubernetes/etcd/data

EnvironmentFile=/k8s/kubernetes/cfg/etcd.conf

ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /k8s/kubernetes/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\" --listen-peer-urls=\"${ETCD_LISTEN_PEER_URLS}\" --advertise-client-urls=\"${ETCD_ADVERTISE_CLIENT_URLS}\" --initial-cluster-token=\"${ETCD_INITIAL_CLUSTER_TOKEN}\" --initial-cluster=\"${ETCD_INITIAL_CLUSTER}\" --initial-cluster-state=\"${ETCD_INITIAL_CLUSTER_STATE}\" --cert-file=\"${ETCD_CERT_FILE}\" --key-file=\"${ETCD_KEY_FILE}\" --trusted-ca-file=\"${ETCD_TRUSTED_CA_FILE}\" --client-cert-auth=\"${ETCD_CLIENT_CERT_AUTH}\" --peer-cert-file=\"${ETCD_PEER_CERT_FILE}\" --peer-key-file=\"${ETCD_PEER_KEY_FILE}\" --peer-trusted-ca-file=\"${ETCD_PEER_TRUSTED_CA_FILE}\" --peer-client-cert-auth=\"${ETCD_PEER_CLIENT_CERT_AUTH}\""

Restart=on-failure

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

 

rsync /usr/lib/systemd/system/etcd.service root@node121:/usr/lib/systemd/system/etcd.service

 

rsync /usr/lib/systemd/system/etcd.service root@node122:/usr/lib/systemd/system/etcd.service

 

rsync /k8s root@node121:/k8s

 

rsync /k8s root@node122:/k8s

 

systemctl daemon-reload

 

systemctl enable etcd

 

systemctl start etcd

 

systemctl status etcd

 

/k8s/kubernetes/bin/etcdctl --ca-file=/k8s/kubernetes/ssl/ca.pem --cert-file=/k8s/kubernetes/ssl/etcd.pem --key-file=/k8s/kubernetes/ssl/etcd-key.pem --endpoints="https://192.168.0.120:2379,https://192.168.0.121:2379,https://192.168.0.122:2379" cluster-health

 

```

* 安装Master节点

```bash

 

cd /opt/soft

 

tar -xvf kubernetes-server-linux-amd64.tar.gz

 

cd kubernetes/server/bin/

 

cp kube-scheduler kube-apiserver kube-controller-manager kubectl /k8s/kubernetes/bin/

 

# 创建 TLS Bootstrapping Token

 

head -c 16 /dev/urandom | od -An -t x | tr -d ' '

398b9553204677e0ff27cda174af0c4c

 

vim /k8s/kubernetes/cfg/token.csv

 

398b9553204677e0ff27cda174af0c4c,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

 

vim /k8s/kubernetes/cfg/kube-apiserver

 

KUBE_APISERVER_OPTS="--logtostderr=true \

--v=4 \

--etcd-servers=https://192.168.0.120:2379,https://192.168.0.121:2379,https://192.168.0.122:2379 \

--bind-address=192.168.0.120 \

--secure-port=6443 \

--advertise-address=192.168.0.120 \

--allow-privileged=true \

--service-cluster-ip-range=10.0.0.0/24 \

--enable-admission-plugins=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \

--authorization-mode=RBAC,Node \

--enable-bootstrap-token-auth \

--token-auth-file=/k8s/kubernetes/cfg/token.csv \

--service-node-port-range=30000-50000 \

--tls-cert-file=/k8s/kubernetes/ssl/kubernetes.pem \

--tls-private-key-file=/k8s/kubernetes/ssl/kubernetes-key.pem \

--client-ca-file=/k8s/kubernetes/ssl/ca.pem \

--service-account-key-file=/k8s/kubernetes/ssl/ca-key.pem \

--etcd-cafile=/k8s/kubernetes/ssl/ca.pem \

--etcd-certfile=/k8s/kubernetes/ssl/etcd.pem \

--etcd-keyfile=/k8s/kubernetes/ssl/etcd-key.pem"

 

vim /usr/lib/systemd/system/kube-apiserver.service

 

[Unit]

Description=Kubernetes API Server

Documentation=https://github.com/kubernetes/kubernetes

[Service]

EnvironmentFile=/k8s/kubernetes/cfg/kube-apiserver

ExecStart=/k8s/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS

Restart=on-failure

[Install]

WantedBy=multi-user.target

 

systemctl daemon-reload

 

systemctl enable kube-apiserver

 

systemctl restart kube-apiserver

 

systemctl status kube-apiserver

 

vim /k8s/kubernetes/cfg/kube-scheduler

 

KUBE_SCHEDULER_OPTS="--logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect"

 

# address:在 127.0.0.1:10251 端口接收 http /metrics 请求;kube-scheduler 目前还不支持接收 https 请求;

# –kubeconfig:指定 kubeconfig 文件路径,kube-scheduler 使用它连接和验证 kube-apiserver;

# –leader-elect=true:集群运行模式,启用选举功能;被选为 leader 的节点负责处理工作,其它节点为阻塞状态;

 

vim /usr/lib/systemd/system/kube-scheduler.service

 

[Unit]

Description=Kubernetes Scheduler

Documentation=https://github.com/kubernetes/kubernetes

[Service]

EnvironmentFile=-/k8s/kubernetes/cfg/kube-scheduler

ExecStart=/k8s/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS

Restart=on-failure

[Install]

WantedBy=multi-user.target

 

systemctl daemon-reload

 

systemctl enable kube-scheduler

 

systemctl restart kube-scheduler

 

systemctl status kube-scheduler

 

vim /k8s/kubernetes/cfg/kube-controller-manager

 

KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \

--v=4 \

--master=127.0.0.1:8080 \

--leader-elect=true \

--address=127.0.0.1 \

--service-cluster-ip-range=10.0.0.0/24 \

--cluster-name=kubernetes \

--cluster-signing-cert-file=/k8s/kubernetes/ssl/ca.pem \

--cluster-signing-key-file=/k8s/kubernetes/ssl/ca-key.pem \

--root-ca-file=/k8s/kubernetes/ssl/ca.pem \

--service-account-private-key-file=/k8s/kubernetes/ssl/ca-key.pem"

 

vim /usr/lib/systemd/system/kube-controller-manager.service

 

[Unit]

Description=Kubernetes Controller Manager

Documentation=https://github.com/kubernetes/kubernetes

[Service]

EnvironmentFile=/k8s/kubernetes/cfg/kube-controller-manager

ExecStart=/k8s/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS

Restart=on-failure

[Install]

WantedBy=multi-user.target

 

systemctl daemon-reload

 

systemctl enable kube-controller-manager

 

systemctl restart kube-controller-manager

 

systemctl status kube-controller-manager

 

vim /etc/profile

PATH=/k8s/kubernetes/bin:$PATH

 

source /etc/profile

 

#查看master集群状态

kubectl get cs,nodes

NAME STATUS MESSAGE ERROR

componentstatus/scheduler Healthy ok

componentstatus/etcd-2 Healthy {"health":"true"}

componentstatus/etcd-1 Healthy {"health":"true"}

componentstatus/etcd-0 Healthy {"health":"true"}

componentstatus/controller-manager Healthy ok

 

```

 

* 部署node 节点 在node节点操作

 

* 安装docker

```bash

yum remove docker \

docker-client \

docker-client-latest \

docker-common \

docker-latest \

docker-latest-logrotate \

docker-logrotate \

docker-selinux \

docker-engine-selinux \

docker-engine

yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager --add-repo https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo

# 官方源

# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

 

yum-config-manager --enable docker-ce-edge

 

yum makecache fast

 

yum install docker-ce

 

systemctl enable docker

 

systemctl start docker

 

systemctl status docker

 

usermod -aG docker $USER

 

vim /etc/sysctl.conf

 

net.bridge.bridge-nf-call-ip6tables = 1

net.bridge.bridge-nf-call-iptables = 1

sysctl -p

 

```

* 安装Flannel

```bash

 

#写入的 Pod 网段 ${CLUSTER_CIDR} 必须是 /16 段地址,必须与 kube-controller-manager 的 –cluster-cidr 参数值一致;

/k8s/kubernetes/bin/etcdctl --ca-file=/k8s/kubernetes/ssl/ca.pem --cert-file=/k8s/kubernetes/ssl/etcd.pem --key-file=/k8s/kubernetes/ssl/etcd-key.pem --endpoints="https://192.168.0.120:2379,https://192.168.0.121:2379,https://192.168.0.122:2379" set /coreos.com/network/config '{ "Network": "10.0.0.0/24", "Backend": {"Type": "vxlan"}}'

cd /opt/soft

 

tar -xvf flannel-v0.10.0-linux-amd64.tar.gz

mv flanneld mk-docker-opts.sh /k8s/kubernetes/bin/ && rm -rf README.md

 

vim /k8s/kubernetes/cfg/flanneld

 

FLANNEL_OPTIONS="--etcd-cafile=/k8s/kubernetes/ssl/ca.pem --etcd-certfile=/k8s/kubernetes/ssl/etcd.pem --etcd-keyfile=/k8s/kubernetes/ssl/etcd-key.pem --etcd-endpoints=https://192.168.0.120:2379,https://192.168.0.121:2379,https://192.168.0.122:2379"

 

vim /usr/lib/systemd/system/flanneld.service

 

[Unit]

Description=Flanneld overlay address etcd agent

After=network-online.target network.target

Before=docker.service

[Service]

Type=notify

EnvironmentFile=/k8s/kubernetes/cfg/flanneld

ExecStart=/k8s/kubernetes/bin/flanneld --ip-masq $FLANNEL_OPTIONS

ExecStartPost=/k8s/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env

Restart=on-failure

[Install]

WantedBy=multi-user.target

 

vim /usr/lib/systemd/system/docker.service

#添加这个

EnvironmentFile=/run/flannel/subnet.env

#修改这个

ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock

 

systemctl daemon-reload

 

systemctl start flanneld

 

systemctl enable flanneld

 

systemctl restart docker

 

ip add

 

```


 

```bash

 

cd /opt/soft

 

tar -xvzf kubernetes-node-linux-amd64.tar.gz

 

cp /kubernetes/node/bin/* /k8s/kubernetes/bin

 

chmod o+x /k8s/kubernetes/bin/* && cd /k8s/kubernetes/cfg

 

vim environment.sh

 

#!/bin/bash

#创建kubelet bootstrapping kubeconfig 和Master上的 /k8s/kubernetes/cfg/token.csv 相同

BOOTSTRAP_TOKEN=398b9553204677e0ff27cda174af0c4c

KUBE_APISERVER="https://192.168.0.120:6443"

#设置集群参数

kubectl config set-cluster kubernetes \

--certificate-authority=/k8s/etcd/ssl/ca.pem \

--embed-certs=true \

--server=${KUBE_APISERVER} \

--kubeconfig=bootstrap.kubeconfig

#设置客户端认证参数

kubectl config set-credentials kubelet-bootstrap \

--token=${BOOTSTRAP_TOKEN} \

--kubeconfig=bootstrap.kubeconfig

# 设置上下文参数

kubectl config set-context default \

--cluster=kubernetes \

--user=kubelet-bootstrap \

--kubeconfig=bootstrap.kubeconfig

# 设置默认上下文

kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

#----------------------

# 创建kube-proxy kubeconfig文件

kubectl config set-cluster kubernetes \

--certificate-authority=/k8s/etcd/ssl/ca.pem \

--embed-certs=true \

--server=${KUBE_APISERVER} \

--kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy \

--client-certificate=/k8s/kubernetes/ssl/kube-proxy.pem \

--client-key=/k8s/kubernetes/ssl/kube-proxy-key.pem \

--embed-certs=true \

--kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default \

--cluster=kubernetes \

--user=kube-proxy \

--kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

 

chmod o+x environment.sh

 

./environment.sh

 

scp bootstrap.kubeconfig kube-proxy.kubeconfig root@node121:/k8s/kubernetes/cfg/

 

scp bootstrap.kubeconfig kube-proxy.kubeconfig root@node122:/k8s/kubernetes/cfg/

 

vim /k8s/kubernetes/cfg/kubelet.config

kind: KubeletConfiguration

apiVersion: kubelet.config.k8s.io/v1beta1

# node121 192.168.0.121 node122 192.168.0.122

address: 192.168.0.121

port: 10250

readOnlyPort: 10255

cgroupDriver: cgroupfs

clusterDNS: ["10.0.0.1"]

clusterDomain: cluster.local.

failSwapOn: false

authentication:

anonymous:

enabled: true

 

vim /k8s/kubernetes/cfg/kubelet

# node121 192.168.0.121 node122 192.168.0.122

KUBELET_OPTS="--logtostderr=true \

--v=4 \

--hostname-override=192.168.0.121 \

--kubeconfig=/k8s/kubernetes/cfg/kubelet.kubeconfig \

--bootstrap-kubeconfig=/k8s/kubernetes/cfg/bootstrap.kubeconfig \

--config=/k8s/kubernetes/cfg/kubelet.config \

--cert-dir=/k8s/kubernetes/ssl \

--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"

 

vim /usr/lib/systemd/system/kubelet.service

 

[Unit]

Description=Kubernetes Kubelet

After=docker.service

Requires=docker.service

[Service]

EnvironmentFile=/k8s/kubernetes/cfg/kubelet

ExecStart=/k8s/kubernetes/bin/kubelet $KUBELET_OPTS

Restart=on-failure

KillMode=process

[Install]

WantedBy=multi-user.target

 

### 在Master操作

kubectl create clusterrolebinding kubelet-bootstrap \

--clusterrole=system:node-bootstrapper \

--user=kubelet-bootstrap

 

systemctl daemon-reload

 

systemctl enable kubelet

 

systemctl restart kubelet

 

systemctl status kubelet

 

yum install -y ipvsadm ipset conntrack

 

vim /k8s/kubernetes/cfg/kube-proxy

# 注意地址 node121 192.168.0.121 node122 192..168.0.122

KUBE_PROXY_OPTS="--logtostderr=true \

--v=4 \

--bind-address=192.168.0.121 \

--masquerade-all \

--feature-gates=SupportIPVSProxyMode=true \

--proxy-mode=ipvs \

--ipvs-min-sync-period=5s \

--ipvs-sync-period=5s \

--ipvs-scheduler=rr \

--logtostderr=true \

--hostname-override=192.168.0.121 \

--cluster-cidr=10.0.0.0/24 \

--kubeconfig=/k8s/kubernetes/cfg/kube-proxy.kubeconfig"

 

vim /usr/lib/systemd/system/kube-proxy.service

 

[Unit]

Description=Kubernetes Proxy

After=network.target

[Service]

EnvironmentFile=/k8s/kubernetes/cfg/kube-proxy

ExecStart=/k8s/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS

Restart=on-failure

[Install]

WantedBy=multi-user.target

 

systemctl daemon-reload

 

systemctl enable kube-proxy

 

systemctl start kube-proxy

 

```

 

```bash

 

#在Master操作

kubectl get csr

NAME AGE REQUESTOR CONDITION

node-csr-An1VRgJ7FEMMF_uyy6iPjyF5ahuLx6tJMbk2SMthwLs 39m kubelet-bootstrap Pending

 

#在Master操作

kubectl certificate approve node-csr-An1VRgJ7FEMMF_uyy6iPjyF5ahuLx6tJMbk2SMthwLs

 

#在Master操作

kubectl get csr

NAME AGE REQUESTOR CONDITION

node-csr-An1VRgJ7FEMMF_uyy6iPjyF5ahuLx6tJMbk2SMthwLs 41m kubelet-bootstrap Approved,Issued

 

#在Master操作

kubectl get nodes

NAME STATUS ROLES AGE VERSION

node-xxxx Ready <none> 12s v1.14.0

 

```

 

* Kube-dns addon

 

```bash

export ADDON_URL="https://kairen.github.io/files/manual-v1.8/addon"

wget "${ADDON_URL}/kube-dns.yml.conf" -O kube-dns.yml

kubectl apply -f kube-dns.yml

kubectl -n kube-system get po -l k8s-app=kube-dns

NAME READY STATUS RESTARTS AGE

kube-dns-6cb549f55f-h4zr5 0/3 Pending 0 40s

```

 

* Dashboard addon

 

```bash

export ADDON_URL="https://kairen.github.io/files/manual-v1.8/addon"

wget ${ADDON_URL}/kube-dashboard.yml.conf -O kube-dashboard.yml

kubectl apply -f kube-dashboard.yml

kubectl -n kube-system get po,svc -l k8s-app=kubernetes-dashboard

NAME READY STATUS RESTARTS AGE

po/kubernetes-dashboard-747c4f7cf-md5m8 1/1 Running 0 56s

NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE

svc/kubernetes-dashboard ClusterIP xxxx <none> 443/TCP 56s

```

 

* Heapster addon

 

```bash

export ADDON_URL="https://kairen.github.io/files/manual-v1.8/addon"

wget ${ADDON_URL}/kube-monitor.yml.conf -O kube-monitor.yml

kubectl apply -f kube-monitor.yml

kubectl -n kube-system get po,svc

NAME READY STATUS RESTARTS AGE

...

po/heapster-74fb5c8cdc-62xzc 4/4 Running 0 7m

po/influxdb-grafana-55bd7df44-nw4nc 2/2 Running 0 7m

 

NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE

...

svc/heapster ClusterIP xxxx <none> 80/TCP 7m

svc/monitoring-grafana ClusterIP xxxx <none> 80/TCP 7m

svc/monitoring-influxdb ClusterIP xxxx <none> 8083/TCP,8086/TCP 7m

···

 

```

 

* nginx 测试

 

```bash

kubectl run nginx --image=nginx --port=80

kubectl expose deploy nginx --port=80 --type=LoadBalancer --external-ip=172.16.35.12

kubectl get svc,po

NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE

svc/kubernetes ClusterIP xxxx <none> 443/TCP 1h

svc/nginx LoadBalancer xxxx xxxx 80:30344/TCP 22s

 

NAME READY STATUS RESTARTS AGE

po/nginx-7cbc4b4d9c-7796l 1/1 Running 0 28s xxxx ,xxxx 80:32054/TCP 21s

```

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值