kubernetes1.9离线部署---操作记录

[root@master bin]# ssh-copy-id node-1
/usr/bin/ssh-copy-id: ERROR: No identities found
[root@master bin]# cd ssh-copy-id
bash: cd: ssh-copy-id: 不是目录
[root@master bin]# vim ssh-copy-id
[root@master bin]# cd ..
[root@master usr]# ls
bin  etc  games  include  lib  lib64  libexec  local  sbin  share  src  tmp
[root@master usr]# cd ..
[root@master /]# ls
1  bin  boot  dev  etc  home  lib  lib64  media  mnt  opt  proc  root  run  sbin  srv  sys  tmp  usr  var
[root@master /]# cd ../
[root@master /]# cd ~
[root@master ~]# pwd
/root
[root@master ~]# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
3c:d9:49:9d:fa:ab:f8:24:b2:40:c2:c1:af:7c:4a:6a root@master
The key's randomart image is:
+--[ RSA 2048]----+
|                 |
| .         . .   |
|  o       . o    |
| . o   . + o     |
|  o o   S +      |
| . +     . .     |
|  + o . . . .    |
| E o . o +   .   |
|o .   . ..o..    |
+-----------------+
[root@master ~]# pwd
/root
[root@master ~]# ls
anaconda-ks.cfg  initial-setup-ks.cfg  k8s_images  software  swarm_wureka.log  tomcat.log
[root@master ~]# ssh-copy-id node-1
The authenticity of host 'node-1 (192.168.43.169)' can't be established.
ECDSA key fingerprint is 27:9f:fa:90:a3:13:44:d1:7b:29:c4:02:f9:ae:c2:36.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@node-1's password:

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'node-1'"
and check to make sure that only the key(s) you wanted were added.

[root@master ~]# ssh node-1
root@node-1's password:
Last login: Wed May  9 13:05:43 2018
[root@node-1 ~]# ^C
[root@node-1 ~]# exit
登出
Connection to node-1 closed.
[root@master ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
   Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
   Active: inactive (dead)
     Docs: man:firewalld(1)
[root@master ~]# vim /etc/selinux/config
[root@master ~]# setenforce 0
[root@master ~]# echo "
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> " >> /etc/sysctl.conf
[root@master ~]# sysctl -p
net.ipv4.ip_forward = 1
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@master ~]# docker load < /root/k8s_images/docker_images/etcd-amd64_v3.1.10.tar
6a749002dd6a: Loading layer [==================================================>]  1.338MB/1.338MB
bbd07ea14872: Loading layer [==================================================>]  159.2MB/159.2MB
611a3394df5d: Loading layer [==================================================>]  32.44MB/32.44MB
Loaded image: gcr.io/google_containers/etcd-amd64:3.1.10
[root@master ~]# docker load </root/k8s_images/docker_images/flannel\:v0.9.1-amd64.tar
bash: /root/k8s_images/docker_images/flannel:v0.9.1-amd64.tar: 没有那个文件或目录
[root@master ~]# docker load </root/k8s_images/docker_images/flannel_v0.9.1-amd64.tar
5bef08742407: Loading layer [==================================================>]  4.221MB/4.221MB
f439636ab0f0: Loading layer [==================================================>]  6.797MB/6.797MB
91b6f6ead101: Loading layer [==================================================>]  4.414MB/4.414MB
fc3c053505e6: Loading layer [==================================================>]  34.49MB/34.49MB
032657ac7c4a: Loading layer [==================================================>]  2.225MB/2.225MB
fd713c7c81af: Loading layer [==================================================>]   5.12kB/5.12kB
Loaded image: quay.io/coreos/flannel:v0.9.1-amd64
[root@master ~]# docker load </root/k8s_images/docker_images/k8s-dns-dnsmasq-nanny-amd64_v1.14.7.tar
b87261cc1ccb: Loading layer [==================================================>]   2.56kB/2.56kB
ac66a5c581a8: Loading layer [==================================================>]    362kB/362kB
22f71f461ac8: Loading layer [==================================================>]  3.072kB/3.072kB
686a085da152: Loading layer [==================================================>]  36.63MB/36.63MB
Loaded image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
[root@master ~]# docker load </root/k8s_images/docker_images/k8s-dns-kube-dns-amd64_1.14.7.tar
cd69fdcd7591: Loading layer [==================================================>]  46.31MB/46.31MB
Loaded image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
[root@master ~]# docker load </root/k8s_images/docker_images/k8s-dns-sidecar-amd64_1.14.7.tar
bd94706d2c63: Loading layer [==================================================>]  38.07MB/38.07MB
Loaded image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
[root@master ~]# docker load </root/k8s_images/docker_images/kube-apiserver-amd64_v1.9.0.tar
0271b8eebde3: Loading layer [==================================================>]  1.338MB/1.338MB
9ccc9fba4253: Loading layer [==================================================>]  209.2MB/209.2MB
Loaded image: gcr.io/google_containers/kube-apiserver-amd64:v1.9.0
[root@master ~]# docker load </root/k8s_images/docker_images/kube-controller-manager-amd64_v1.9.0.tar
50a426d115f8: Loading layer [==================================================>]  136.6MB/136.6MB
Loaded image: gcr.io/google_containers/kube-controller-manager-amd64:v1.9.0
[root@master ~]# docker load </root/k8s_images/docker_images/kube-scheduler-amd64_v1.9.0.tar
f733b8f8af29: Loading layer [==================================================>]  61.57MB/61.57MB
Loaded image: gcr.io/google_containers/kube-scheduler-amd64:v1.9.0
[root@master ~]# docker load < /root/k8s_images/docker_images/kube-proxy-amd64_v1.9.0.tar
684c19bf2c27: Loading layer [==================================================>]   44.2MB/44.2MB
deb4ca39ea31: Loading layer [==================================================>]  3.358MB/3.358MB
9c44b0d51ed1: Loading layer [==================================================>]  63.38MB/63.38MB
Loaded image: gcr.io/google_containers/kube-proxy-amd64:v1.9.0
[root@master ~]# docker load </root/k8s_images/docker_images/pause-amd64_3.0.tar
5f70bf18a086: Loading layer [==================================================>]  1.024kB/1.024kB
41ff149e94f2: Loading layer [==================================================>]  748.5kB/748.5kB
Loaded image: gcr.io/google_containers/pause-amd64:3.0
[root@master ~]# docker load < /root/k8s_images/docker_images/kubernetes-dashboard_v1.8.1.tar
64c55db70c4a: Loading layer [==================================================>]  121.2MB/121.2MB
Loaded image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.1
[root@master ~]# cd /root
[root@master ~]# ls
anaconda-ks.cfg  initial-setup-ks.cfg  k8s_images  software  swarm_wureka.log  tomcat.log
[root@master ~]# cd k8s_images/
[root@master k8s_images]# ls
container-selinux-2.33-1.git86f33cd.el7.noarch.rpm          docker_images                      libtool-ltdl-2.4.2-22.el7_3.x86_64.rpm
device-mapper-1.02.140-8.el7.x86_64.rpm                     kubeadm-1.9.0-0.x86_64.rpm         libxml2-python-2.9.1-6.el7_2.3.x86_64.rpm
device-mapper-event-1.02.140-8.el7.x86_64.rpm               kubectl-1.9.0-0.x86_64.rpm         lsof-4.87-4.el7.x86_64.rpm
device-mapper-event-libs-1.02.140-8.el7.x86_64.rpm          kube-flannel.yml                   lvm2-2.02.171-8.el7.x86_64.rpm
device-mapper-libs-1.02.140-8.el7.x86_64.rpm                kubelet-1.9.9-9.x86_64.rpm         lvm2-libs-2.02.171-8.el7.x86_64.rpm
device-mapper-persistent-data-0.7.0-0.1.rc6.el7.x86_64.rpm  kubernetes-cni-0.6.0-0.x86_64.rpm  python-kitchen-1.1.1-5.el7.noarch.rpm
docker-ce-17.03.2.ce-1.el7.centos.x86_64.rpm                kubernetes-dashboard.yaml          socat-1.7.3.2-2.el7.x86_64.rpm
docker-ce-selinux-17.03.2.ce-1.el7.centos.noarch.rpm        libseccomp-2.3.1-3.el7.x86_64.rpm  yum-utils-1.1.31-42.el7.noarch.rpm
[root@master k8s_images]# rpm -ivh socat-1.7.3.2-2.el7.x86_64.rpm
准备中...                          ################################# [100%]
正在升级/安装...
   1:socat-1.7.3.2-2.el7              ################################# [100%]
[root@master k8s_images]# rpm -ivh kubernetes-cni-0.6.0-0.x86_64.rpm  kubelet-1.9.9-9.x86_64.rpm  kubectl-1.9.0-0.x86_64.rpm
警告:kubernetes-cni-0.6.0-0.x86_64.rpm: 头V4 RSA/SHA1 Signature, 密钥 ID 3e1ba8d5: NOKEY
准备中...                          ################################# [100%]
正在升级/安装...
   1:kubelet-1.9.0-0                  ################################# [ 33%]
   2:kubernetes-cni-0.6.0-0           ################################# [ 67%]
   3:kubectl-1.9.0-0                  ################################# [100%]
[root@master k8s_images]# rpm -ivh kubectl-1.9.0-0.x86_64.rpm
警告:kubectl-1.9.0-0.x86_64.rpm: 头V4 RSA/SHA1 Signature, 密钥 ID 3e1ba8d5: NOKEY
准备中...                          ################################# [100%]
    软件包 kubectl-1.9.0-0.x86_64 已经安装
[root@master k8s_images]# rpm -ivh kubeadm-1.9.0-0.x86_64.rpm
警告:kubeadm-1.9.0-0.x86_64.rpm: 头V4 RSA/SHA1 Signature, 密钥 ID 3e1ba8d5: NOKEY
准备中...                          ################################# [100%]
正在升级/安装...
   1:kubeadm-1.9.0-0                  ################################# [100%]
[root@master k8s_images]# systemctl enable kubelet && sudo systemctl start kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /etc/systemd/system/kubelet.service.
[root@master k8s_images]# kubeadm init --kubernetes-version=v1.9.0 --pod-network-cidr=10.244.0.0/16
[init] Using Kubernetes version: v1.9.0
[init] Using Authorization modes: [Node RBAC]
[preflight] Running pre-flight checks.
    [WARNING SystemVerification]: docker version is greater than the most recently validated version. Docker version: 17.04.0-ce. Max validated version: 17.03
    [WARNING FileExisting-crictl]: crictl not found in system path
[preflight] Some fatal errors occurred:
    [ERROR Swap]: running with swap on is not supported. Please disable swap
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
[root@master k8s_images]# kubeadm reset
[preflight] Running pre-flight checks.
[reset] Stopping the kubelet service.
[reset] Unmounting mounted directories in "/var/lib/kubelet"
[reset] Removing kubernetes-managed containers.
[reset] No etcd manifest found in "/etc/kubernetes/manifests/etcd.yaml". Assuming external etcd.
[reset] Deleting contents of stateful directories: [/var/lib/kubelet /etc/cni/net.d /var/lib/dockershim /var/run/kubernetes]
[reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
[root@master k8s_images]# cd /etc/systemd/system/
[root@master system]# ls
basic.target.wants                           default.target.wants                                     printer.target.wants
bluetooth.target.wants                       dev-virtio\x2dports-org.qemu.guest_agent.0.device.wants  remote-fs.target.wants
dbus-org.bluez.service                       display-manager.service                                  sockets.target.wants
dbus-org.freedesktop.Avahi.service           getty.target.wants                                       spice-vdagentd.target.wants
dbus-org.freedesktop.ModemManager1.service   graphical.target.wants                                   sssd.service.d
dbus-org.freedesktop.NetworkManager.service  kubelet.service                                          sysinit.target.wants
dbus-org.freedesktop.nm-dispatcher.service   kubelet.service.d                                        system-update.target.wants
default.target                               multi-user.target.wants
[root@master system]# cd kubelet.service.d/
[root@master kubelet.service.d]# ls
10-kubeadm.conf
[root@master kubelet.service.d]# vim 10-kubeadm.conf
[root@master kubelet.service.d]# swapoff -a
[root@master kubelet.service.d]# systemctl daemon-reload
[root@master kubelet.service.d]# systemctl restart kubelet
[root@master kubelet.service.d]# kubeadm init --kubernetes-version=v1.9.0 --pod-network-cidr=10.244.0.0/16
[init] Using Kubernetes version: v1.9.0
[init] Using Authorization modes: [Node RBAC]
[preflight] Running pre-flight checks.
    [WARNING SystemVerification]: docker version is greater than the most recently validated version. Docker version: 17.04.0-ce. Max validated version: 17.03
    [WARNING FileExisting-crictl]: crictl not found in system path
[certificates] Generated ca certificate and key.
[certificates] Generated apiserver certificate and key.
[certificates] apiserver serving cert is signed for DNS names [master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.43.168]
[certificates] Generated apiserver-kubelet-client certificate and key.
[certificates] Generated sa key and public key.
[certificates] Generated front-proxy-ca certificate and key.
[certificates] Generated front-proxy-client certificate and key.
[certificates] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[kubeconfig] Wrote KubeConfig file to disk: "admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "controller-manager.conf"
[kubeconfig] Wrote KubeConfig file to disk: "scheduler.conf"
[controlplane] Wrote Static Pod manifest for component kube-apiserver to "/etc/kubernetes/manifests/kube-apiserver.yaml"
[controlplane] Wrote Static Pod manifest for component kube-controller-manager to "/etc/kubernetes/manifests/kube-controller-manager.yaml"
[controlplane] Wrote Static Pod manifest for component kube-scheduler to "/etc/kubernetes/manifests/kube-scheduler.yaml"
[etcd] Wrote Static Pod manifest for a local etcd instance to "/etc/kubernetes/manifests/etcd.yaml"
[init] Waiting for the kubelet to boot up the control plane as Static Pods from directory "/etc/kubernetes/manifests".
[init] This might take a minute or longer if the control plane images have to be pulled.
[apiclient] All control plane components are healthy after 72.004005 seconds
[uploadconfig]?Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[markmaster] Will mark node master as master by adding a label and a taint
[markmaster] Master master tainted and labelled with key/value: node-role.kubernetes.io/master=""
[bootstraptoken] Using token: c19991.0acc988fb11987d5
[bootstraptoken] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstraptoken] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstraptoken] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstraptoken] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: kube-dns
[addons] Applied essential addon: kube-proxy

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of machines by running the following on each node
as root:

  kubeadm join --token c19991.0acc988fb11987d5 192.168.43.168:6443 --discovery-token-ca-cert-hash sha256:88f54f1f3a0a86ae52c91db07751fab97d1a7d994efcc8abf6f0f126d970756f

kubeadm join --token fadc19.112aa8c9e9156ac1 192.168.43.168:6443 --discovery-token-ca-cert-hash sha256:88f54f1f3a0a86ae52c91db07751fab97d1a7d994efcc8abf6f0f126d970756f

[root@master kubelet.service.d]# export KUBECONFIG=/etc/kubernetes/admin.conf
[root@master kubelet.service.d]# vim /etc/profile
[root@master kubelet.service.d]# echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
[root@master kubelet.service.d]# vim /etc/profile
[root@master kubelet.service.d]# vim ~/.bash_profile
[root@master kubelet.service.d]# source ~/.bash_profile
[root@master kubelet.service.d]# kubectl version
Client Version: version.Info{Major:"1", Minor:"9", GitVersion:"v1.9.0", GitCommit:"925c127ec6b946659ad0fd596fa959be43f0cc05", GitTreeState:"clean", BuildDate:"2017-12-15T21:07:38Z", GoVersion:"go1.9.2", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"9", GitVersion:"v1.9.0", GitCommit:"925c127ec6b946659ad0fd596fa959be43f0cc05", GitTreeState:"clean", BuildDate:"2017-12-15T20:55:30Z", GoVersion:"go1.9.2", Compiler:"gc", Platform:"linux/amd64"}
[root@master kubelet.service.d]# cd /root/k8s_images/
[root@master k8s_images]# ls
container-selinux-2.33-1.git86f33cd.el7.noarch.rpm          docker_images                      libtool-ltdl-2.4.2-22.el7_3.x86_64.rpm
device-mapper-1.02.140-8.el7.x86_64.rpm                     kubeadm-1.9.0-0.x86_64.rpm         libxml2-python-2.9.1-6.el7_2.3.x86_64.rpm
device-mapper-event-1.02.140-8.el7.x86_64.rpm               kubectl-1.9.0-0.x86_64.rpm         lsof-4.87-4.el7.x86_64.rpm
device-mapper-event-libs-1.02.140-8.el7.x86_64.rpm          kube-flannel.yml                   lvm2-2.02.171-8.el7.x86_64.rpm
device-mapper-libs-1.02.140-8.el7.x86_64.rpm                kubelet-1.9.9-9.x86_64.rpm         lvm2-libs-2.02.171-8.el7.x86_64.rpm
device-mapper-persistent-data-0.7.0-0.1.rc6.el7.x86_64.rpm  kubernetes-cni-0.6.0-0.x86_64.rpm  python-kitchen-1.1.1-5.el7.noarch.rpm
docker-ce-17.03.2.ce-1.el7.centos.x86_64.rpm                kubernetes-dashboard.yaml          socat-1.7.3.2-2.el7.x86_64.rpm
docker-ce-selinux-17.03.2.ce-1.el7.centos.noarch.rpm        libseccomp-2.3.1-3.el7.x86_64.rpm  yum-utils-1.1.31-42.el7.noarch.rpm
[root@master k8s_images]# kubectl create -f kube-flannel.yml
clusterrole "flannel" created
clusterrolebinding "flannel" created
serviceaccount "flannel" created
configmap "kube-flannel-cfg" created
daemonset "kube-flannel-ds" created
[root@master k8s_images]# kubectl get nodes
NAME      STATUS    ROLES     AGE       VERSION
master    Ready     master    12m       v1.9.0
[root@master k8s_images]# kubectl get pods --all-namespaces
NAMESPACE     NAME                             READY     STATUS    RESTARTS   AGE
kube-system   etcd-master                      1/1       Running   1          3m
kube-system   kube-apiserver-master            1/1       Running   0          3m
kube-system   kube-controller-manager-master   1/1       Running   1          3m
kube-system   kube-dns-6f4fd4bdf-nvch9         3/3       Running   0          14m
kube-system   kube-flannel-ds-v5fg6            1/1       Running   0          3m
kube-system   kube-proxy-gbww9                 1/1       Running   0          14m
kube-system   kube-scheduler-master            1/1       Running   1          3m
[root@master k8s_images]#

 

 


[root@master xulei]# swapoff -a
[root@master xulei]# export KUBECONFIG=/etc/kubernetes/admin.conf
[root@master xulei]# systemctl start kubelet
[root@master xulei]# systemctl status kubelet
● kubelet.service - kubelet: The Kubernetes Node Agent
   Loaded: loaded (/etc/systemd/system/kubelet.service; enabled; vendor preset: disabled)
  Drop-In: /etc/systemd/system/kubelet.service.d
           └─10-kubeadm.conf
   Active: active (running) since 三 2018-05-09 15:23:13 CST; 13s ago
     Docs: http://kubernetes.io/docs/
 Main PID: 6547 (kubelet)
   Memory: 44.5M
   CGroup: /system.slice/kubelet.service
           └─6547 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --pod-m...

5月 09 15:23:24 master kubelet[6547]: E0509 15:23:23.522505    6547 reflector.go:205] k8s.io/kubernetes/pkg/kubelet/kubelet.go:465: Faile... refused
5月 09 15:23:24 master kubelet[6547]: E0509 15:23:23.522805    6547 reflector.go:205] k8s.io/kubernetes/pkg/kubelet/config/apiserver.go:4... refused
5月 09 15:23:24 master kubelet[6547]: E0509 15:23:23.522936    6547 reflector.go:205] k8s.io/kubernetes/pkg/kubelet/kubelet.go:474: Faile... refused
5月 09 15:23:24 master kubelet[6547]: E0509 15:23:24.608633    6547 reflector.go:205] k8s.io/kubernetes/pkg/kubelet/kubelet.go:465: Faile... refused
5月 09 15:23:24 master kubelet[6547]: E0509 15:23:24.609410    6547 reflector.go:205] k8s.io/kubernetes/pkg/kubelet/config/apiserver.go:4... refused
5月 09 15:23:24 master kubelet[6547]: E0509 15:23:24.623509    6547 reflector.go:205] k8s.io/kubernetes/pkg/kubelet/kubelet.go:474: Faile... refused
5月 09 15:23:25 master kubelet[6547]: E0509 15:23:25.678045    6547 reflector.go:205] k8s.io/kubernetes/pkg/kubelet/kubelet.go:465: Faile... refused
5月 09 15:23:25 master kubelet[6547]: E0509 15:23:25.678197    6547 reflector.go:205] k8s.io/kubernetes/pkg/kubelet/kubelet.go:474: Faile... refused
5月 09 15:23:25 master kubelet[6547]: E0509 15:23:25.678313    6547 reflector.go:205] k8s.io/kubernetes/pkg/kubelet/config/apiserver.go:4... refused
5月 09 15:23:26 master kubelet[6547]: W0509 15:23:26.007794    6547 docker_container.go:193] Deleted previously existing symlink file: "/...r_1.log"
Hint: Some lines were ellipsized, use -l to show in full.
[root@master xulei]#

 

[root@master xulei]# kubectl get pods --all-namespaces
NAMESPACE     NAME                                   READY     STATUS    RESTARTS   AGE
kube-system   etcd-master                            1/1       Running   2          1h
kube-system   kube-apiserver-master                  1/1       Running   1          1h
kube-system   kube-controller-manager-master         1/1       Running   3          1h
kube-system   kube-dns-6f4fd4bdf-nvch9               3/3       Running   6          1h
kube-system   kube-flannel-ds-s28lb                  1/1       Running   0          46m
kube-system   kube-flannel-ds-v5fg6                  1/1       Running   2          1h
kube-system   kube-proxy-gbww9                       1/1       Running   1          1h
kube-system   kube-proxy-w5p8x                       1/1       Running   0          46m
kube-system   kube-scheduler-master                  1/1       Running   3          1h
kube-system   kubernetes-dashboard-58f5cb49c-8sfdb   1/1       Running   0          43m
[root@master xulei]# netstat -lntp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      6801/etcd           
tcp        0      0 127.0.0.1:10251         0.0.0.0:*               LISTEN      6860/kube-scheduler
tcp        0      0 127.0.0.1:2380          0.0.0.0:*               LISTEN      6801/etcd           
tcp        0      0 127.0.0.1:10252         0.0.0.0:*               LISTEN      6833/kube-controlle
tcp        0      0 0.0.0.0:111             0.0.0.0:*               LISTEN      1/systemd           
tcp        0      0 192.168.122.1:53        0.0.0.0:*               LISTEN      2386/dnsmasq        
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      1008/sshd           
tcp        0      0 127.0.0.1:631           0.0.0.0:*               LISTEN      1006/cupsd          
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      2204/master         
tcp        0      0 127.0.0.1:10248         0.0.0.0:*               LISTEN      6547/kubelet        
tcp        0      0 127.0.0.1:10249         0.0.0.0:*               LISTEN      7067/kube-proxy     
tcp6       0      0 :::6443                 :::*                    LISTEN      6793/kube-apiserver
tcp6       0      0 :::10255                :::*                    LISTEN      6547/kubelet        
tcp6       0      0 :::111                  :::*                    LISTEN      1/systemd           
tcp6       0      0 :::10256                :::*                    LISTEN      7067/kube-proxy     
tcp6       0      0 :::22                   :::*                    LISTEN      1008/sshd           
tcp6       0      0 ::1:631                 :::*                    LISTEN      1006/cupsd          
tcp6       0      0 ::1:25                  :::*                    LISTEN      2204/master         
tcp6       0      0 :::32666                :::*                    LISTEN      7067/kube-proxy     
tcp6       0      0 :::8090                 :::*                    LISTEN      2483/java           
tcp6       0      0 :::36894                :::*                    LISTEN      2483/java           
tcp6       0      0 :::2375                 :::*                    LISTEN      1010/dockerd        
tcp6       0      0 :::10250                :::*                    LISTEN      6547/kubelet        
[root@master xulei]# kubectl get services kubernetes-dashboard -n kube-system
NAME                   TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.108.90.108   <none>        443:32666/TCP   54m
[root@master xulei]# ^C
[root@master xulei]#

basic_auth_file=/etc/kubernetes/pki/basic_auth_file

 

 

 

[root@master xulei]# kubectl get clusterrole/cluster-admin -o yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  creationTimestamp: 2018-05-09T05:40:01Z
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: cluster-admin
  resourceVersion: "40"
  selfLink: /apis/rbac.authorization.k8s.io/v1/clusterroles/cluster-admin
  uid: 6abd9540-534b-11e8-8db3-000c29caafed
rules:
- apiGroups:
  - '*'
  resources:
  - '*'
  verbs:
  - '*'
- nonResourceURLs:
  - '*'
  verbs:
  - '*'
[root@master xulei]# kubectl create clusterrolebinding login-on-dashboard-with-cluster-admin --clusterrole=cluster-admin --user=admin
clusterrolebinding "login-on-dashboard-with-cluster-admin" created
[root@master xulei]# kubectl get clusterrolebinding/login-on-dashboard-with-cluster-admin -o yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  creationTimestamp: 2018-05-09T08:22:49Z
  name: login-on-dashboard-with-cluster-admin
  resourceVersion: "11369"
  selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/login-on-dashboard-with-cluster-admin
  uid: 28eb3a5b-5362-11e8-815b-000c29caafed
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: admin
[root@master xulei]#

 


[root@master xulei]# kubectl get services kubernetes-dashboard -n kube-system
NAME                   TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.108.90.108   <none>        443:32666/TCP   2h
[root@master xulei]# kubectl get deployment kubernetes-dashboard  -n kube-system
NAME                   DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
kubernetes-dashboard   1         1         1            0           2h
[root@master xulei]# kubectl get pods  -n kube-system | grep dashboard
kubernetes-dashboard-58f5cb49c-8sfdb   1/1       Running   0          2h
[root@master xulei]# kubectl cluster-info
Kubernetes master is running at https://192.168.43.168:6443
KubeDNS is running at https://192.168.43.168:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@master xulei]#

 

kubectl proxy --address='10.501.101.41' --port=8086 --accept-hosts='^*$' Starting to serve on 10.501.101.41:8086

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值