简介
参考官方文档
http://kafka.apache.org/intro
准备
Host: centos
Ip: 192.168.122.144
关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
关闭selinux
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
简单的说,要在kubernets集群上运行kafka集群你需要做以下步骤:
1.安装一个kubernetes集群
2.安装一个本地存储,例如NFS作为kubernetes的后台服务
3.创建一个storage class
4.部署zookeeper集群
5.部署kafka集群
部署 kubernetes
可以使用熟悉的工具,例如:kubeadm
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
安装NFS
yum -y install nfs-utils.x86_64
mkdir /opt/data
cat /etc/exports
/opt/data/ 192.168.122.0/24(rw,sync,no_root_squash)
systemctl restart nfs
systemctl enable nfs
在kubernetes集群上部署nfs-client
git clone https://github.com/kubernetes-incubator/external-storage.git
cd external-storage/nfs-client
设置授权
如果你的kubernetes集群启用了RBAC,你必须进行授权,如果你的名字空间不是在default下,请编辑deploy/deployment。
NS=$(kubectl config get-contexts|grep -e "^\*" |awk '{print $5}')
NAMESPACE=${NS:-default}
sed -i'' "s/namespace:.*/namespace: $NAMESPACE/g" ./deploy/rbac.yaml ./deploy/deployment.yaml
kubectl create -f deploy/rbac.yaml
配置NFS-Client provisioner
编辑deploy/deploument.yaml替换你的NFS的信息
...
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: <YOUR NFS SERVER HOSTNAME>
- name: NFS_PATH
value: /var/nfs
volumes:
- name: nfs-client-root
nfs:
server: <YOUR NFS SERVER HOSTNAME>
path: /var/nfs
kubectl apply -f deploy/deployment.yaml
创建storage class
kubectl apply -f deploy/class.yaml
[root@localhost ~]# kubectl get sc
NAME PROVISIONER AGE
managed-nfs-storage fuseim.pri/ifs 22h
部署zookeeper
创建configmap
cat > zookeeper-config.yaml << EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: zk-config
data:
ensemble: "zk-0;zk-1;zk-2"
jvm.heap: "512M"
tick: "2000"
init: "10"
sync: "5"
client.cnxns: "60"
snap.retain: "3"
purge.interval: "1"
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: zk-budget
spec:
selector:
matchLabels:
app: zk
minAvailable: 2
EOF
kubectl apply -f zookeeper-config.yaml
创建service
cat > zookeeper-svc.yaml << EOF
apiVersion: v1
kind: Service
metadata:
name: zk-headless
labels:
app: zk-headless
spec:
type: ClusterIP
ports:
- port: 2181
name: client
- port: 2888
name: server
- port: 3888
name: leader-election
selector:
app: zk
EOF
kubectl apply -f zookeeper-svc.yaml
部署zookeeper statefulset
cat > zookeeper-sts.yaml << EOF
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: zk
spec:
serviceName: zk-headless
replicas: 3
template:
metadata:
labels:
app: zk
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
containers:
- name: k8szk
imagePullPolicy: IfNotPresent
image: gcr.io/google_samples/k8szk:v1
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
env:
- name : ZK_ENSEMBLE
valueFrom:
configMapKeyRef:
name: zk-config
key: ensemble
- name : ZK_HEAP_SIZE
valueFrom:
configMapKeyRef:
name: zk-config
key: jvm.heap
- name : ZK_TICK_TIME
valueFrom:
configMapKeyRef:
name: zk-config
key: tick
- name : ZK_INIT_LIMIT
valueFrom:
configMapKeyRef:
name: zk-config
key: init
- name : ZK_SYNC_LIMIT
valueFrom:
configMapKeyRef:
name: