k8s zookeeper 集群,pod状态异常…
zk镜像 kubebiz/zookeeper
cat start-zookeeper
...
function print_servers() {
for (( i=1; i<=$SERVERS; i++ ))
do
#echo "server.$i=$NAME-$((i-1)).$DOMAIN:$SERVER_PORT:$ELECTION_PORT"
if [ $ORD -eq $((i-1)) ]; then
echo "server.$i=0.0.0.0:$SERVER_PORT:$ELECTION_PORT"
else
echo "server.$i=$NAME-$((i-1)).$DOMAIN:$SERVER_PORT:$ELECTION_PORT"
fi
done
}
...
[root@hadoop03 k8s]# cat nufront-zookeeper-dockerfile
FROM kubebiz/zookeeper:latest
##镜像维护者信息
#MAINTAINER "lancelot<844256931@qq.com>"
COPY start-zookeeper /usr/bin/
###
[root@hadoop03 k8s]# docker build -f nufront-zookeeper-dockerfile -t nufront/zookeeper:v1 .
zookeeper.yaml
apiVersion: v1
kind: Service
metadata:
name: zk-hs
labels:
app: zk
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: Service
metadata:
name: zk-service
spec:
type: NodePort # 配置NodePort,外部流量可访问k8s中的服务
ports:
- port: 2181 # 服务访问端口,集群内部访问的端口
targetPort: 2181 # pod控制器中定义的端口(应用访问的端口)
nodePort: 32181 # NodePort,外部客户端访问的端口
selector:
app: zk
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: zk-pdb
spec:
selector:
matchLabels:
app: zk
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zk
spec:
selector:
matchLabels:
app: zk
serviceName: zk-hs
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: OrderedReady
template:
metadata:
labels:
app: zk
spec:
#affinity:
# podAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: "app"
# operator: In
# values:
# - zk
# topologyKey: "kubernetes.io/hostname"
nodeSelector:
kubernetes.io/hostname: "hadoop03"
containers:
- name: kubernetes-zookeeper
imagePullPolicy: Always
#image: "kubebiz/zookeeper"
image: hadoop03:5000/nufront/zookeeper:v1
resources:
requests:
memory: "512Mi"
cpu: "0.1"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
command:
- sh
- -c
- "start-zookeeper \
--servers=3 \
--data_dir=/var/lib/zookeeper/data \
--data_log_dir=/var/lib/zookeeper/data/log \
--conf_dir=/opt/zookeeper/conf \
--client_port=2181 \
--election_port=3888 \
--server_port=2888 \
--tick_time=2000 \
--init_limit=10 \
--sync_limit=5 \
--heap=512M \
--max_client_cnxns=60 \
--snap_retain_count=3 \
--purge_interval=12 \
--max_session_timeout=40000 \
--min_session_timeout=4000 \
--log_level=INFO"
readinessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /var/lib/zookeeper
securityContext:
runAsUser: 1000
fsGroup: 1000
volumeClaimTemplates:
- metadata:
name: datadir
annotations:
volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
accessModes: [ "ReadWriteOnce" ]