k8s2

https://github.com/kubernetes 项目仓库
自动阔缩
自动完成容器迁移
k8s是一个容器编排系统
运维:发布,变更,故障的处理
发布:控制器 控制器坏了自主修复
变更:控制器来修复
故障的处理:

[root@hadoop ~]# cd /usr/lib/modules/3.10.0-693.el7.x86_64/kernel/net/netfilter/ipvs/
[root@hadoop ipvs]# vim /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for mod in $(ls $ipvs_mods_dir | grep -o "^[^.]*");do
        /sbin/modinfo -F filename $mod &> /dev/null
        if [ $? -eq 0 ];then
                /sbin/modprobe $mod
        fi
done
[root@hadoop yum.repos.d]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@hadoop yum.repos.d]# scp  docker-ce.repo nova1:/etc/yum.repos.d/			4台
[root@hadoop ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@hadoop ~]# yum -y install container-selinux
					4台
[root@hadoop ~]# yum -y install docker-ce
					4台
[root@hadoop ~]# vim /usr/lib/systemd/system/docker.service
Environment="HTTPS_PROXY=http://www.ik8s.io:10070"
					14行
Environment="NO_PROXY=127.0.0.1/8,192.168.8.254/24"
					15行
ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT
					16行
[root@nova1 ~]# which iptables
/usr/sbin/iptables
[root@hadoop ~]# systemctl daemon-reload
[root@hadoop ~]# systemctl restart docker
[root@hadoop ~]# docker info
[root@hadoop ~]# iptables -vnL
[root@hadoop ~]# sysctl -a | grep bridge
[root@hadoop ~]# vim /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@hadoop ~]# sysctl -p /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@hadoop ~]# scp /etc/sysctl.d/k8s.conf nova1:/etc/sysctl.d/			3台
[root@hadoop ~]# scp /usr/lib/systemd/system/docker.service nova1:/usr/lib/systemd/system/docker.service			3台
[root@nova1 ~]# systemctl daemon-reload && systemctl restart docker			3台
[root@nova1 ~]# docker info
[root@hadoop ~]# cd /etc/yum.repos.d/
[root@hadoop yum.repos.d]# vim k8s.repo
[k8s]
name=k8s k8d
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
        https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
[root@hadoop ~]# yum repolist
[root@hadoop ~]# yum list all | grep "^kube"
[root@hadoop ~]# yum -y install kubeadm kubectl kubelet
[root@hadoop ~]# rpm -ql kubelet
/etc/kubernetes/manifests
/etc/sysconfig/kubelet
/usr/bin/kubelet
/usr/lib/systemd/system/kubelet.service
[root@hadoop ~]# vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--fail-swap-on=false"
[root@hadoop ~]# kubeadm -h
[root@hadoop ~]# kubeadm config -h
[root@hadoop ~]# kubeadm config print -h
[root@hadoop ~]# kubeadm config print init-defaults
flannel: 10.244.0.0/16
calico:  192.168.0.0/16
[root@hadoop ~]# kubeadm init --kubernetes-version="v1.14.1" --pod-network-cidr="10.244.0.0/16" --dry-run --ignore-preflight-errors=Swap
[root@hadoop ~]# kubeadm config images list
				列镜像
[root@hadoop ~]# docker image list
[root@hadoop ~]# kubeadm config images pull
[root@hadoop ~]# docker image list
[root@hadoop ~]# kubeadm init --kubernetes-version="v1.14.1" --pod-network-cidr="10.244.0.0/16" --ignore-preflight-errors=Swap
[root@hadoop ~]# mkdir .kube
[root@hadoop ~]# cp /etc/kubernetes/admin.conf .kube/config
[root@hadoop ~]# kubectl get nodes
https://github.com/coreos	浏览器
[root@hadoop ~]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
[root@hadoop ~]# kubectl get pode -n kube-sysem
[root@hadoop ~]# vim kubeadm-init.txt
kubeadm join 192.168.8.10:6443 --token pzbs7d.u6g0qqvj3ro418an --discovery-token-ca-cert-hash sha256:285b81a66bcd718da46fb061b1e1948ca3666bfdadc78f0a0c9a407b48e64c69
[root@hadoop ~]# scp /etc/yum.repos.d/k8s.repo   nova1:/etc/yum.repos.d/			3台
[root@nova1 ~]# yum -y install kubeadm kubelet
						3台
[root@hadoop ~]# scp  /etc/sysconfig/kubelet nova1:/etc/sysconfig/				3台
[root@nova1 ~]# kubeadm join 192.168.8.10:6443 --token pzbs7d.u6g0qqvj3ro418an --discovery-token-ca-cert-hash sha256:285b81a66bcd718da46fb061b1e1948ca3666bfdadc78f0a0c9a407b48e64c69 --gnore-preflight-errors=Swap
[root@nova1 ~]# docker image list
[root@hadoop ~]# kubectl get nodes
k8s=6443
[root@hadoop ~]# kubectl config view
[root@hadoop ~]# kubectl get nodes
[root@hadoop ~]# kubectl get pode
[root@hadoop ~]# kubectl get pode -n kube-system
[root@hadoop ~]# kubectl get pode -n kube-system -o wide
[root@hadoop ~]# kubectl create namespace develpo
[root@hadoop ~]# kubectl create namespace testing
[root@hadoop ~]# kubectl create namespace prod
[root@hadoop ~]# kubectl get ns
[root@hadoop ~]# kubectl delete namespaces develop
[root@hadoop ~]# kubectl get ns
[root@hadoop ~]# kubectl delete ns/testing ns/prod
[root@hadoop ~]# kubectl get ns/default -o json
[root@hadoop ~]# kubectl create deploy ngx-dep --image=nginx:1.14-alpine
[root@hadoop ~]# kubectl get all
[root@hadoop ~]# kubectl get pods -o wide
[root@hadoop ~]# curl 192.168.8.11
[root@hadoop ~]# kubectl delete ngx-dep-68699d7cc4-jjtfz
[root@hadoop ~]# curl 192.168.8.12
[root@hadoop ~]# kubectl create service clusterip -h
[root@hadoop ~]# kubectl create service clusterip ngx-svc --tcp=80:80
[root@hadoop ~]# kubectl get svc
[root@hadoop ~]# kubectl get svc/ngx-svc -o yaml
[root@hadoop ~]# kubectl delete svc/ngx-svc
[root@hadoop ~]# kubectl create service clusterip ngx-dep --tcp=80:80
[root@hadoop ~]# kubectl get svc/ngx-dep -o yaml
[root@hadoop ~]# curl 192.168.8.11
[root@hadoop ~]# kubectl get pods
[root@hadoop ~]# kubectl delete ngx-dep-68699d7cc4-tr9jb
[root@hadoop ~]# kubectl get pods
[root@hadoop ~]# kubectl get pods -o wide
[root@hadoop ~]# kubectl describe svc/ngx-dep
[root@hadoop ~]# curl 192.168.8.11
[root@hadoop ~]# curl ngx-dep
[root@hadoop ~]# kubectl get pods -n kube-system
[root@hadoop ~]# kubectl get svc -n kube-system

负载均衡

[root@hadoop ~]# kubectl describe svc/myapp
[root@hadoop ~]# curl myapp.default.svc.cluster/hostname.html
[root@hadoop ~]# kubectl describe svc/myapp
[root@hadoop ~]# kubectl create service nodep
[root@hadoop ~]# iptables -t nat -vnL
[root@hadoop ~]# ps aux
[root@hadoop ~]# kubectl api-versions
[root@hadoop ~]# kubectl get pods
[root@hadoop ~]# kubectl get deploy myapp
[root@hadoop ~]# kubectl get deploy myapp -o yaml
[root@hadoop ~]# mkdir -p manifests/basic
[root@hadoop basic]# vim develop-ns.yaml
[root@hadoop basic]# kubectl get ns default -o yaml
[root@hadoop basic]# kubectl get ns default -o yaml --export
[root@hadoop basic]# vim develop-ns.yaml
apiVersion: v1
kind: Namespace
metadata:
  name: develop
[root@hadoop basic]# kubectl get ns
[root@hadoop basic]# kubectl create -f develop-ns.yaml
[root@hadoop basic]# kubectl get ns
[root@hadoop basic]# cp develop-ns.yaml  prod-ns.yaml
[root@hadoop basic]# vim prod-ns.yaml
apiVersion: v1
kind: Namespace
metadata:
  name: prod
[root@hadoop basic]# kubectl create -f prod-ns.yaml
[root@hadoop basic]# kubectl get ns
https://cn.bing.com/			浏览器
kubernetes reference			搜索
https://kubernetes.io/docs/reference/浏览器
https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/			浏览器
https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#namespace-v1-core
						浏览器
https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#pod-v1-core	浏览器
[root@hadoop ~]# kubectl get pods
[root@hadoop ~]# kubectl get pods myapp-6c5bfd4dff-gsnhz -o yaml
[root@hadoop ~]# kubectl get pods myapp-6c5bfd4dff-gsnhz -o yaml --export
[root@hadoop ~]# kubectl get pods myapp-6c5bfd4dff-gsnhz -o yaml --export > manifests/basic/pod-demo.yaml
[root@hadoop ~]# vim manifests/basic/pod-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  name: pod-demo			5行
  namespace: develop		6行
spec:
  containers:
  - image: ikubernetes/myapp:v1
    imagePullPolicy: IfNotPresent
    name: myapp
    resources: {}
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  priority: 0
  restarName: Always
  schedulerName: default-scheduler
  securityContext: {}
[root@hadoop ~]# kubectl apply -f manifests/basic/pod-demo.yaml
[root@hadoop ~]# kubectl get pods
[root@hadoop ~]# kubectl get pods -n develop
[root@hadoop ~]# kubectl explain pods
[root@hadoop ~]# kubectl explain pods.metadata
[root@hadoop ~]# kubectl explain Pods.spec.containers.env.valueFrom
[root@hadoop ~]# kubectl explain pods.spec.containers.command
[root@hadoop ~]# vim manifests/basic/pod-demo-2.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-demo
namespace: prod
spec:
containers:
- name: myapp
  image: ikubernetes/myapp:v1
- name: bbox
  image: busybox:latest
  imagePullPolicy: IfNotPresent
  command: ["/bin/sh","-c","sleep 86400"]
[root@hadoop ~]# kubectl apply -f manifests/basic/pod-demo-2.yaml
[root@hadoop ~]# kubectl get pods -n prod
[root@hadoop ~]# kubectl get pods -n prod -o yaml
[root@hadoop ~]# kubectl exec pod-demo -c bbox -n prod -it --/bin/sh
/ # ifconfig
/ # netstat -tnl
/ # ps aux
/ # curl 127.0.0.1
/ # wget -O - -q 127.0.0.1
[root@hadoop ~]# kubectl logs pod-demo -n prod -c myapp
/ # wget -O - -q 127.0.0.1/hostname.html
[root@hadoop ~]# kubectl logs pod-demo -n prod -c myapp
[root@hadoop ~]# kubectl logs myapp-6c5bfd4dff-gsnhz
[root@hadoop ~]# kubectl explain pods.spec
[root@hadoop ~]# vim manifests/basic/host-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: mypod
namespace: default
spec:
containers:
- name: myapp
  image: ikubernetes/myapp:v1
  hostNetwork: true
[root@hadoop ~]# kubectl apply -f manifests/basic/host-pod.yaml
[root@hadoop ~]# kubectl get pods -o wids
[root@hadoop ~]# kubectl explain pods.spec.containers.ports
[root@hadoop ~]# kubectl delete -f host-pod.yaml
[root@hadoop ~]# vim manifests/basic/host-pod.yaml
..........................................................
    image: ikubernetes/myapp:v1
    ports:
    - protocol: TCP
      containerPort: 80
      name: http
      hostPort: 8080
[root@hadoop ~]# kubectl apply -f manifests/basic/host-pod.yaml
[root@hadoop ~]# kubectl get pods -o wide
标记
[root@hadoop ~]# kubectl pods --show-labels
[root@hadoop ~]# kubectl pods -n prod --show-labels
[root@hadoop ~]# vim manifests/basic/pod-demo-2.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: prod
  labels:
    app: pod-demo
    rel: stable
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
  - name: bbox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command: ["/bin/sh","-c","sleep 86400"]
[root@hadoop ~]# kubectl apply -f pod-demo-2.yaml
[root@hadoop ~]# kubectl pods -n prod --show-labels
[root@hadoop ~]# kubectl ylabel -h
[root@hadoop ~]# kubectl label pods pod-demo -n prod tier=frontend
[root@hadoop ~]# kubectl pods -n prod --show-labels
[root@hadoop ~]# kubectl label pods pod-demo -n prod rel-
[root@hadoop ~]# kubectl pods -n prod --show-labels
[root@hadoop ~]# kubectl get pods --show-labels -l app!=myapp
[root@hadoop ~]# kubectl get pods --show-labels -l "app in (myapp,ngx-dep)"
[root@hadoop ~]# kubectl get pods --show-labels -l "app in (myapp,ngx-dep)" -L app
[root@hadoop ~]# kubectl get pods -l "app in (myapp,ngx-dep)" -L app
[root@hadoop ~]# kubectl get pods -l "app notin (myapp,ngx-dep)" -L app
[root@hadoop ~]# kubectl get pods -l "app notin (myapp,ngx-dep)" --show-labels
[root@hadoop ~]# kubectl get pods -l "app" --show-labels
[root@hadoop ~]# kubectl get pods -l '!app' --show-labels
[root@hadoop ~]# kubectl annotate -h
[root@hadoop ~]# kubectl explain pods.metadata
[root@hadoop ~]# vim manifests/basic/pod-demo-2.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: prod
  labels:
    app: pod-demo
    rel: stable
  annotations:
    ik8s.io/project: hello
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
  - name: bbox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command: ["/bin/sh","-c","sleep 86400"]
[root@hadoop ~]# kubectl apply -f pod-demo-2.yaml
[root@hadoop ~]# kubectl get -h | grep annota
[root@hadoop ~]# kubectl get pods -n prod
[root@hadoop ~]# kubectl get pods -o yaml
[root@hadoop ~]# kubectl describe pods pod-demo -n prod -o yaml
[root@hadoop ~]# kubectl explain pods.spec.containers.lifecycle
[root@hadoop ~]# kubectl explain pods.spec.containers.lifecycle.postStart.tcpSocket
[root@hadoop ~]# kubectl explain pods.spec.containers.livenessProbe
https://github.com/iKubernetes		浏览器
[root@hadoop ~]# git clone https://github.com/iKubernetes/Kubernetes_Advanced_Practical.git
[root@hadoop ~]# cd Kubernetes_Advanced_Practical/
[root@hadoop Kubernetes_Advanced_Practical]# ls
chapter10  chapter13  chapter2  chapter5  chapter8    README.md
chapter11  chapter14  chapter3  chapter6  chapter9
chapter12  chapter15  chapter4  chapter7  conf_files
[root@hadoop Kubernetes_Advanced_Practical]# cd chapter4/
[root@hadoop ~]# kubectl explain pods.spec.containers
[root@hadoop ~]# kubectl explain pods.spec.containers.livenessProbe
[root@hadoop chapter4]# vim liveness-exec.yaml
    - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
[root@hadoop chapter4]# kubectl apply -f liveness-exec.yaml
[root@hadoop chapter4]# kubectl get pods
[root@hadoop chapter4]# kubectl get pods -o wide
[root@hadoop chapter4]# kubectl get pods -w
[root@hadoop chapter4]# kubectl describe pods liveness-exec 
[root@hadoop chapter4]# kubectl get pods
[root@hadoop chapter4]# vim liveness-http.yaml
........................................................
      periodSeconds: 2
      failureThreshold: 2
      initialDelaySeconds: 3
[root@hadoop chapter4]# kubectl apply -f liveness-http.yaml
[root@hadoop chapter4]# kubectl get pods -w
[root@hadoop chapter4]# kubectl get pods
[root@hadoop chapter4]# kubectl describe pods liveness-http
[root@hadoop chapter4]# kubectl exec -it liveness-http -- /bin/sh
/ # cd /usr/share/nginx/html
[root@hadoop chapter4]# kubectl apply -f liveness-http.yaml
[root@hadoop chapter4]# vim readiness-exec.yaml
[root@hadoop chapter4]# kubectl apply -f readiness-exec.yaml
[root@hadoop chapter4]# kubectl get pods -w
[root@hadoop chapter4]# kubectl exec readiness-exec --rm -f /tmp/ready
[root@hadoop chapter4]# kubectl get pods -w
[root@hadoop chapter4]# kubectl exec readiness-exec -- touch /tmp/ready
[root@hadoop chapter4]# kubectl get pods -w
[root@hadoop chapter4]# kubectl explain pods.spec
[root@hadoop chapter4]# kubectl explain pods.spec.securityContext
[root@hadoop chapter4]# kubectl explain pods.spec.containers.securityContext.capabilities
[root@hadoop ~]# kubectl explain pods.spec.containers
[root@hadoop ~]# kubectl explain pods.spec.containers.resources
[root@hadoop chapter4]# vim stress-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: stress-pod
spec:
  containers:
  - name: stress
    image: ikubernetes/stress-ng
    command: ["/usr/bin/stress-ng", "-c 1", "-m 1", "--metrics-brief"]
    resources:
      requests:
        memory: "128Mi"
        cpu: "200m"
      limits:
        memory: "512Mi"
        cpu: "400m"
[root@hadoop chapter4]# kubectl apply -f stress-pod.yaml
[root@hadoop chapter4]# kubectl get pods
[root@hadoop chapter4]# vim memleak-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: memleak-pod
spec:
  containers:
  - name: simmemleak
    image: saadali/simmemleak
    resources:
      requests:
        memory: "64Mi"
        cpu: "1"
      limits:
        memory: "64Mi"
        cpu: "1"
[root@hadoop chapter4]# kubectl apply -f memleak-pod.yaml
[root@hadoop chapter4]# kubectl get pods
[root@hadoop chapter4]# kubectl describe pods memleak-pod
[root@hadoop chapter4]# kubectl get pods
[root@hadoop chapter4]# kubectl describe pods liveness-http.yaml
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值