1 简介
- stolon架构
keeper:管理PostgreSQL实例,汇聚到由sentinel(s)提供的clusterview。
sentinel:发现并监控keeper,并且计算最理想的clusterview。
proxy:客户端的接入点。它连接到PostgreSQL的master并且强制关闭非选举产生master。
Stolon用etcd或者consul作为主要的集群状态存储,默认使用Kubernetes的存储来保存集群的状态。
2 helm部署stolon postgresql集群
2.1 获取charts
git clone https://github.com/helm/charts.git
cp charts/stable/stolon/values.yaml ./
2.2 修改values.yaml,内容如下
#clusterName:
image:
repository: sorintlab/stolon
tag: v0.16.0-pg10 #master-pg11
pullPolicy: IfNotPresent
## Add secrets manually via kubectl on kubernetes cluster and reference here
# pullSecrets:
# - name: "myKubernetesSecret"
#used by create-cluster-job when store.backend is etcd
etcdImage:
repository: k8s.gcr.io/etcd-amd64
tag: 2.3.7 #3.3.18,和当前集群的etcd版本一致
pullPolicy: IfNotPresent
debug: false
#Enable the creation of a shm volume
shmVolume:
enabled: false
persistence:
enabled: true
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClassName: ""
accessModes:
- ReadWriteOnce
size: 10Gi
rbac:
create: true
serviceAccount:
create: true
# The name of the ServiceAccount to use. If not set and create is true, a name is generated using the fullname template
name:
superuserSecret: #修改1
name: "pg-su"
usernameKey: username
passwordKey: password
replicationSecret: #修改2
name: "pg-repl"
usernameKey: username
passwordKey: password
superuserPasswordFile:
#superuserUsername: "stolon" #修改3,注释掉
##password for the superuser (REQUIRED if superuserSecret and superuserPasswordFile are not set)
#superuserPassword: #修改4,注释掉
replicationPasswordFile:
#replicationUsername: "repluser" #修改5,注释掉
##password for the replication user (REQUIRED if replicationSecret and replicationPasswordFile are not set)
#replicationPassword: #修改6,注释掉
##backend could be one of the following: consul, etcdv2, etcdv3 or kubernetes
store:
backend: kubernetes
#endpoints: "http://stolon-consul:8500"
kubeResourceKind: configmap
pgParameters: #修改7
max_connections: "1000"
ports:
stolon:
containerPort: 5432
metrics:
containerPort: 8080
serviceMonitor:
# When set to true then use a ServiceMonitor to collect metrics
enabled: false
# Custom labels to use in the ServiceMonitor to be matched with a specific Prometheus
labels: {}
# Set the namespace the ServiceMonitor should be deployed to
# namespace: default
# Set how frequently Prometheus should scrape
# interval: 30s
# Set timeout for scrape
# scrapeTimeout: 10s
job:
autoCreateCluster: true
autoUpdateClusterSpec: true
annotations: {}
clusterSpec: #修改8
synchronousReplication: true
minSynchronousStandbys: 1 # quorum-like replication
maxSynchronousStandbys: 1 # quorum-like replication
initMode: new
# sleepInterval: 1s
# maxStandbys: 5
##Enable support ssl into postgres, you must specify the certs.
##ref: https://www.postgresql.org/docs/10/ssl-tcp.html
##
tls:
enabled: false
rootCa: |-
serverCrt: |-
serverKey: |-
# existingSecret: name-of-existing-secret-to-postgresql
keeper:
uid_prefix: "keeper"
replicaCount: 3 #修改9
annotations: {}
resources: {}
priorityClassName: ""
fsGroup: ""
service:
type: ClusterIP
annotations: {}
ports:
keeper:
port: 5432
targetPort: 5432
protocol: TCP
nodeSelector: {}
affinity: {}
tolerations: []
volumes: []
volumeMounts: []
hooks:
failKeeper:
enabled: false
podDisruptionBudget:
minAvailable: 2
# maxUnavailable: 1
extraEnv: []
# - name: STKEEPER_LOG_LEVEL
# value: "info"
proxy:
replicaCount: 3
annotations: {}
resources: {}
priorityClassName: ""
service:
type: ClusterIP
#loadBalancerIP: ""
annotations: {}
ports:
proxy:
port: 5432
targetPort: 5432
protocol: TCP
nodeSelector: {}
affinity: {}
tolerations: []
podDisruptionBudget:
# minAvailable: 1
# maxUnavailable: 1
extraEnv: []
# - name: STPROXY_LOG_LEVEL
# value: "info"
# - name: STPROXY_TCP_KEEPALIVE_COUNT
# value: "0"
# - name: STPROXY_TCP_KEEPALIVE_IDLE
# value: "0"
# - name: STPROXY_TCP_KEEPALIVE_INTERVAL
# value: "0"
sentinel:
replicaCount: 3
annotations: {}
resources: {}
priorityClassName: ""
nodeSelector: {}
affinity: {}
tolerations: []
podDisruptionBudget:
# minAvailable: 1
# maxUnavailable: 1
extraEnv: []
# - name: STSENTINEL_LOG_LEVEL
# value: "info"
##initdb scripts
##Specify dictionary of scripts to be run at first boot, the entry point script is create_script.sh
##i.e. you can use pgsql to run sql script on the cluster.
##
#initdbScripts:
#create_script.sh: |
##!/bin/sh
#echo "Do something."
##nodePostStart scripts
##Specify dictionary of scripts to be run at first boot, the entry point script is postStartScript.sh
##i.e. you can create tablespace directory here.
##
#nodePostStartScript:
#postStartScript.sh: |
##!/bin/bash
#echo "Do something."
2.3 创建对应的secret
kubectl create secret generic pg-su \
--namespace test \
--from-literal=username='su_username' --from-literal=password='su_password'
kubectl create secret generic pg-repl \
--namespace test \
--from-literal=username='repl_username' --from-literal=password='repl_password'
2.4 安装集群
helm install pg --namespace test -f values.yaml charts/stable/stolon/
#查看su_username的密码,没改的话就是su_password
echo $(kubectl get secret --namespace test pg-su -o jsonpath="{.data.password}" | base64 --decode)
#登录psql,10.254.158.5为pg-stolon-proxy service的地址
kubectl -n test exec -it pg-stolon-keeper-0 -- psql --host 10.254.158.5 --port 5432 --username su_username -W -d postgres
#data目录在keep的/stolon-data/postgres下
3 高可用测试
3.1 用到的sql语句
#登录
kubectl exec -it pg-stolon-keeper-0 -- psql --host 10.254.158.5 --port 5432 --username su_username -W -d postgres
#密码: su_password
#除了上面的登录方法,还可以用下面的方式
kubectl -n test exec -it pg-stolon-keeper-0 bash
psql --host 10.254.158.5 --port 5432 --username su_username -W -d postgres
#查看是否是master
select client_addr,sync_state from pg_stat_replication;
select pg_is_in_recovery();
#建库建表
CREATE DATABASE sonar;
\c sonar;
create table mydbtable(name varchar(80),year int);
insert into mydbtable (name,year) values ('xiaoming',23);
select * from mydbtable;
insert into mydbtable (name,year) values ('xiaomi',11);
3.2 创建数据库——查看是否同步
- 创建数据库
- keep0-master
kubectl -n test exec -it pg-stolon-keeper-0 -- psql --host 192.168.86.18 --port 5432 --username su_username -W -d postgres
可以看到
1)192.168.28.15是同步的standby,192.168.86.17是异步的standby~
2)pg_is_in_recovery=f意味着该节点是master,反之则是slave
- keep1-standby1
- keep2-standby2
3.3 模拟故障
kubectl -n test delete po pg-stolon-keeper-0
- 查看sentinel日志
- 原来的master(keep0)成为了standby
- keep1成为了master
- keep2还是slave
3.4 master切换效率
由此可见,master故障时,sentinel的数量对重新选举和数据同步并无显著影响。为节省资源,可以将其副本数设置为1
转载请注明出处,谢谢!