openstack-helm

openstack-helm 安装

helm有2个版本,分别为helm2和helm3,目前openstack-helm只支持helm2,因此:下载得helm版本为helm-v2.17.0

wget https://get.helm.sh/helm-v2.17.0-linux-amd64.tar.gz
tar -xvf helm-v2.17.0-linux-amd64.tar.gz
cp linux-amd64/helm /usr/local/bin/

helm

helm 初始化

# 要安装 Helm 的服务端程序,我们需要使用到kubectl工具,所以先确保kubectl工具能够正常的访问 kubernetes 集群的apiserver哦
# 初始化helm仓库
helm init --upgrade -i registry.cn-hangzhou.aliyuncs.com/google_containers/tiller:v2.17.0 --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
helm serve &

# 另外一个值得注意的问题是RBAC,我们的 kubernetes 集群是1.8.x版本的,默认开启了RBAC访问控制,所以我们需要为Tiller创建一个ServiceAccount,让他拥有执行的权限,详细内容可以查看 Helm 文档中的Role-based Access Control。 创建rbac.yaml文件创建helm集群账户和角色
vim service-accounts.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: tiller
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: tiller
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
  - kind: ServiceAccount
    name: tiller
    namespace: kube-system

kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default

openstack-helm下载

cd /opt
git clone https://github.com/openstack/openstack-helm.git
git clone https://github.com/openstack/openstack-helm-infra.git

openstack组件安装

安装依赖包
yum -y install make jq git curl

修改openstack安装版本

cd /opt/openstack-helm
git diff tools/deployment/common/setup-client.sh
-  -c${UPPER_CONSTRAINTS_FILE:=https://releases.openstack.org/constraints/upper/${OPENSTACK_RELEASE:-stein}} \
+  -c${UPPER_CONSTRAINTS_FILE:=https://releases.openstack.org/constraints/upper/${OPENSTACK_RELEASE:-ussuri}} \
 
 
git diff tools/deployment/common/get-values-overrides.sh
-: "${OPENSTACK_RELEASE:="train"}"
+: "${OPENSTACK_RELEASE:="ussuri"}"

创建OpenStack clients 和 Kubernetes RBAC rules

./tools/deployment/developer/common/020-setup-client.sh

helm再部署过程中,会依赖nodes label进行调度,因此需要给节点添加标签

kubectl label nodes 10.2.11.176 nginx-ingress=enabled
kubectl label nodes 10.2.11.177 nginx-ingress=enabled
kubectl label nodes 10.2.11.176 openstack-control-plane=enabled
kubectl label nodes 10.2.11.177 openstack-control-plane=enabled
kubectl label nodes 10.2.11.178 openstack-control-plane=enabled
kubectl label nodes 10.2.11.176 openstack-compute-node=enabled
kubectl label nodes 10.2.11.177 openstack-compute-node=enabled
kubectl label nodes 10.2.11.178 openstack-compute-node=enabled
kubectl label nodes 10.2.11.176 openvswitch=enabled
kubectl label nodes 10.2.11.177 openvswitch=enabled
kubectl label nodes 10.2.11.178 openvswitch=enabled

安装ingress

# 修改ingress镜像,原始镜像被墙了,下载不下来
cd /opt/openstack-helm-infra/
git diff ingress/values.yaml
-    ingress: k8s.gcr.io/ingress-nginx/controller:v0.42.0
-    ingress_module_init: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
-    ingress_routed_vip: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
-    error_pages: k8s.gcr.io/defaultbackend:1.4
+    ingress: docker.io/willdockerhub/ingress-nginx-controller:v0.42.0
+    ingress_module_init: docker.io/openstackhelm/neutron:ussuri-ubuntu_bionic
+    ingress_routed_vip: docker.io/openstackhelm/neutron:ussuri-ubuntu_bionic
+    error_pages: docker.io/chenliujin/defaultbackend:1.4
 labels:
   server:
-    node_selector_key: openstack-control-plane
+    node_selector_key: nginx-ingress
     node_selector_value: enabled
   error_server:
-    node_selector_key: openstack-control-plane
+    node_selector_key: nginx-ingress
     node_selector_value: enabled
-    addr: 172.18.0.1/11
+    addr: 10.221.0.1/11

执行部署脚本

OSH_DEPLOY_MULTINODE=True ./tools/deployment/component/common/ingress.sh

安装ceph

修改openstack-helm ceph脚本

git diff tools/deployment/multinode/030-ceph.sh
diff --git a/tools/deployment/multinode/030-ceph.sh b/tools/deployment/multinode/030-ceph.sh
index b3fa8db2..f7cc9ed8 100755
--- a/tools/deployment/multinode/030-ceph.sh
+++ b/tools/deployment/multinode/030-ceph.sh
@@ -37,13 +37,13 @@ network:
   cluster: ${CEPH_CLUSTER_NETWORK}
 deployment:
   storage_secrets: true
-  ceph: true
-  rbd_provisioner: true
-  csi_rbd_provisioner: true
+  ceph: false
+  rbd_provisioner: false
+  csi_rbd_provisioner: false
   cephfs_provisioner: false
   client_secrets: false
 bootstrap:
-  enabled: true
+  enabled: false
 conf:
   ceph:
     global:
@@ -74,7 +74,7 @@ manifests:
 EOF
 
 : ${OSH_INFRA_PATH:="../openstack-helm-infra"}
-for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do
+for CHART in ceph-mon ; do
   make -C ${OSH_INFRA_PATH} ${CHART}
   helm upgrade --install ${CHART} ${OSH_INFRA_PATH}/${CHART} \
     --namespace=ceph \
@@ -82,14 +82,4 @@ for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do
     ${OSH_EXTRA_HELM_ARGS} \
     ${OSH_EXTRA_HELM_ARGS_CEPH_DEPLOY}
 
-  #NOTE: Wait for deploy
-  ./tools/deployment/common/wait-for-pods.sh ceph 1200
-
-  #NOTE: Validate deploy
-  MON_POD=$(kubectl get pods \
-    --namespace=ceph \
-    --selector="application=ceph" \
-    --selector="component=mon" \
-    --no-headers | awk '{ print $1; exit }')
-  kubectl exec -n ceph ${MON_POD} -- ceph -s
 done
 
git diff tools/deployment/multinode/kube-node-subnet.sh
diff --git a/tools/deployment/multinode/kube-node-subnet.sh b/tools/deployment/multinode/kube-node-subnet.sh
index 08f069a8..9ed56742 100755
--- a/tools/deployment/multinode/kube-node-subnet.sh
+++ b/tools/deployment/multinode/kube-node-subnet.sh
@@ -19,7 +19,6 @@ kubectl get nodes -o json | jq -r '.items[].status.addresses[] | select(.type=="
 function run_and_log_ipcalc {
   POD_NAME="tmp-$(cat /dev/urandom | env LC_CTYPE=C tr -dc a-z | head -c 5; echo)"
   kubectl run ${POD_NAME} \
-    --generator=run-pod/v1 \
     --wait \
     --image ${UTILS_IMAGE} \
     --restart=Never \

修改openstack-helm-infra ceph镜像名称

git diff ceph-client/values.yaml
diff --git a/ceph-client/values.yaml b/ceph-client/values.yaml
index 92c31611..a6920dcb 100644
--- a/ceph-client/values.yaml
+++ b/ceph-client/values.yaml
@@ -24,11 +24,11 @@ release_group: null
 images:
   pull_policy: IfNotPresent
   tags:
-    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
-    ceph_mds: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_mgr: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_rbd_pool: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
+    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
+    ceph_mds: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_mgr: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_rbd_pool: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
     dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
     image_repo_sync: 'docker.io/library/docker:17.07.0'
 
git diff ceph-mon/values.yaml
diff --git a/ceph-mon/values.yaml b/ceph-mon/values.yaml
index f060c13a..7d284d67 100644
--- a/ceph-mon/values.yaml
+++ b/ceph-mon/values.yaml
@@ -23,10 +23,10 @@ deployment:
 images:
   pull_policy: IfNotPresent
   tags:
-    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
-    ceph_mon: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_mon_check: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
+    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
+    ceph_mon: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_mon_check: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
     dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
     image_repo_sync: 'docker.io/library/docker:17.07.0'
 
git diff ceph-osd/values.yaml
diff --git a/ceph-osd/values.yaml b/ceph-osd/values.yaml
index 7277a73c..906810e3 100644
--- a/ceph-osd/values.yaml
+++ b/ceph-osd/values.yaml
@@ -19,9 +19,9 @@
 images:
   pull_policy: IfNotPresent
   tags:
-    ceph_osd: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
+    ceph_osd: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
     dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
     image_repo_sync: 'docker.io/library/docker:17.07.0'
 
git diff ceph-provisioners/values.yaml
diff --git a/ceph-provisioners/values.yaml b/ceph-provisioners/values.yaml
index b4ab0a9d..a6f9c0b7 100644
--- a/ceph-provisioners/values.yaml
+++ b/ceph-provisioners/values.yaml
@@ -30,10 +30,10 @@ release_group: null
 images:
   pull_policy: IfNotPresent
   tags:
-    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
+    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
     ceph_cephfs_provisioner: 'docker.io/openstackhelm/ceph-cephfs-provisioner:ubuntu_bionic-20200521'
-    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
-    ceph_rbd_provisioner: 'docker.io/openstackhelm/ceph-rbd-provisioner:change_770201_ubuntu_bionic-20210113'
+    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
+    ceph_rbd_provisioner: 'docker.io/openstackhelm/ceph-rbd-provisioner:latest-ubuntu_bionic'
     csi_provisioner: 'quay.io/k8scsi/csi-provisioner:v1.6.0'
     csi_snapshotter: 'quay.io/k8scsi/csi-snapshotter:v2.1.1'
     csi_attacher: 'quay.io/k8scsi/csi-attacher:v2.1.1'
 
git diff ceph-rgw/values.yaml
diff --git a/ceph-rgw/values.yaml b/ceph-rgw/values.yaml
index a5147856..21aedfec 100644
--- a/ceph-rgw/values.yaml
+++ b/ceph-rgw/values.yaml
@@ -24,13 +24,13 @@ release_group: null
 images:
   pull_policy: IfNotPresent
   tags:
-    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
-    ceph_rgw: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
+    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
+    ceph_rgw: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
     dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
     image_repo_sync: 'docker.io/library/docker:17.07.0'
-    rgw_s3_admin: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
-    rgw_placement_targets: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
+    rgw_s3_admin: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
+    rgw_placement_targets: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
     ks_endpoints: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
     ks_service: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
     ks_user: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'

修改ceph-mon模板文件

git diff ceph-mon/templates/bin/mon/_start.sh.tpl
diff --git a/ceph-mon/templates/bin/mon/_start.sh.tpl b/ceph-mon/templates/bin/mon/_start.sh.tpl
index b045a39e..96745c2b 100644
--- a/ceph-mon/templates/bin/mon/_start.sh.tpl
+++ b/ceph-mon/templates/bin/mon/_start.sh.tpl
@@ -28,49 +28,6 @@ if [[ -z "$CEPH_PUBLIC_NETWORK" ]]; then
   exit 1
 fi
 
-if [[ -z "$MON_IP" ]]; then
-  echo "ERROR- MON_IP must be defined as the IP address of the monitor"
-  exit 1
-fi
-
-if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
-    MON_NAME=${POD_NAME}
-else
-    MON_NAME=${NODE_NAME}
-fi
-MON_DATA_DIR="/var/lib/ceph/mon/${CLUSTER}-${MON_NAME}"
-MONMAP="/etc/ceph/monmap-${CLUSTER}"
-
-# Make the monitor directory
-/bin/sh -c "mkdir -p \"${MON_DATA_DIR}\""
-
-function get_mon_config {
-  # Get fsid from ceph.conf
-  local fsid=$(ceph-conf --lookup fsid -c /etc/ceph/${CLUSTER}.conf)
-
-  timeout=10
-  MONMAP_ADD=""
-
-  while [[ -z "${MONMAP_ADD// }" && "${timeout}" -gt 0 ]]; do
-    # Get the ceph mon pods (name and IP) from the Kubernetes API. Formatted as a set of monmap params
-    if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
-        MONMAP_ADD=$(kubectl get pods --namespace=${NAMESPACE} ${KUBECTL_PARAM} -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--addv {{`{{.metadata.name}}`}} [v1:{{`{{.status.podIP}}`}}:${MON_PORT},v2:{{`{{.status.podIP}}`}}:${MON_PORT_V2}] {{`{{end}}`}} {{`{{end}}`}}")
-    else
-        MONMAP_ADD=$(kubectl get pods --namespace=${NAMESPACE} ${KUBECTL_PARAM} -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--addv {{`{{.spec.nodeName}}`}} [v1:{{`{{.status.podIP}}`}}:${MON_PORT},v2:{{`{{.status.podIP}}`}}:${MON_PORT_V2}] {{`{{end}}`}} {{`{{end}}`}}")
-    fi
-    (( timeout-- ))
-    sleep 1
-  done
-
-  if [[ -z "${MONMAP_ADD// }" ]]; then
-      exit 1
-  fi
-
-  # Create a monmap with the Pod Names and IP
-  monmaptool --create ${MONMAP_ADD} --fsid ${fsid} ${MONMAP} --clobber
-}
-
-get_mon_config
 
 # If we don't have a monitor keyring, this is a new monitor
 if [ ! -e "${MON_DATA_DIR}/keyring" ]; then
@@ -81,33 +38,9 @@ if [ ! -e "${MON_DATA_DIR}/keyring" ]; then
     cp -vf ${MON_KEYRING}.seed ${MON_KEYRING}
   fi
 
-  if [ ! -e ${MONMAP} ]; then
-    echo "ERROR- ${MONMAP} must exist. You can extract it from your current monitor by running 'ceph mon getmap -o ${MONMAP}' or use a KV Store"
-    exit 1
-  fi
-
   # Testing if it's not the first monitor, if one key doesn't exist we assume none of them exist
   for KEYRING in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING} ${ADMIN_KEYRING}; do
     ceph-authtool ${MON_KEYRING} --import-keyring ${KEYRING}
   done
-
-  # Prepare the monitor daemon's directory with the map and keyring
-  ceph-mon --setuser ceph --setgroup ceph --cluster "${CLUSTER}" --mkfs -i ${MON_NAME} --inject-monmap ${MONMAP} --keyring ${MON_KEYRING} --mon-data "${MON_DATA_DIR}"
-else
-  echo "Trying to get the most recent monmap..."
-  # Ignore when we timeout, in most cases that means the cluster has no quorum or
-  # no mons are up and running yet
-  timeout 5 ceph --cluster "${CLUSTER}" mon getmap -o ${MONMAP} || true
-  ceph-mon --setuser ceph --setgroup ceph --cluster "${CLUSTER}" -i ${MON_NAME} --inject-monmap ${MONMAP} --keyring ${MON_KEYRING} --mon-data "${MON_DATA_DIR}"
-  timeout 7 ceph --cluster "${CLUSTER}" mon add "${MON_NAME}" "${MON_IP}:${MON_PORT_V2}" || true
 fi
 
-# start MON
-exec /usr/bin/ceph-mon \
-  --cluster "${CLUSTER}" \
-  --setuser "ceph" \
-  --setgroup "ceph" \
-  -d \
-  -i ${MON_NAME} \
-  --mon-data "${MON_DATA_DIR}" \
-  --public-addr "${MON_IP}:${MON_PORT_V2}"

修改ceph-client-admin-keyring secrets
将ceph admin得密钥镜像base64编码,修改ceph.client.admin.keyring

kubectl edit ceph-client-admin-keyring -n ceph

apiVersion: v1
data:
  keyring: Y2xpZW50LmFkbWluCiAgICAgICAga2V5OiBBUUFoZXR0ZEFBQUFBQkFBRUVSL3F1S0p5bGhFRDZ2UXg3YzhWdz09CiAgICAgICAgY2FwczogW21kc10gYWxsb3cgKgogICAgICAgIGNhcHM6IFttZ3JdIGFsbG93ICoKICAgICAgICBjYXBzOiBbbW9uXSBhbGxvdyAqCiAgICAgICAgY2FwczogW29zZF0gYWxsb3cgKg==
kind: Secret
metadata:
  name: ceph-client-admin-keyring
  namespace: ceph
tee /tmp/pvc-ceph-client-key.yaml <<EOF
apiVersion: v1
data:
  key: QVFBaGV0dGRBQUFBQUJBQUVFUi9xdUtKeWxoRUQ2dlF4N2M4Vnc9PQ==
kind: Secret
metadata:
  labels:
    application: ceph
    component: rbd
    release_group: ceph-openstack-config
  name: pvc-ceph-client-key
  namespace: openstack
type: kubernetes.io/rbd
EOF

kubectl apply -f /tmp/pvc-ceph-client-key.yaml

执行部署脚本

# 1.nodes节点打标签 2.修改ceph相关/values.yaml文件,镜像名称 3.创建ceph-mon-discovery endpoints
./tools/deployment/multinode/030-ceph.sh

openstack-ceph

创建ceph-mon-discovery Endpoints

tee /tmp/ceph-mon-discovery.yaml <<EOF
apiVersion: v1
kind: Endpoints
metadata:
  labels:
    app: ceph
    mon_cluster: ceph
    rook_cluster: ceph
  name: ceph-mon-discovery
  namespace: ceph
subsets:
- addresses:
  - ip: 10.2.11.176
    nodeName: k8s-1
    targetRef:
      kind: Pod
      namespace: ceph
  - ip: 10.2.11.177
    nodeName: k8s-2
    targetRef:
      kind: Pod
      namespace: ceph
  - ip: 10.2.11.178
    nodeName: k8s-3
    targetRef:
      kind: Pod
      namespace: ceph

  ports:
  - name: mon-msgr2
    port: 3300
    protocol: TCP
  - name: mon
    port: 6789
    protocol: TCP
EOF

#配置ceph-openstack相关
./tools/deployment/multinode/040-ceph-ns-activate.sh

Mariadb

修改openstack-helm脚本,使mariadb使用k8s volume

git diff tools/deployment/multinode/050-mariadb.sh
diff --git a/tools/deployment/multinode/050-mariadb.sh b/tools/deployment/multinode/050-mariadb.sh
index 5ba6d44a..2d50dd14 100755
--- a/tools/deployment/multinode/050-mariadb.sh
+++ b/tools/deployment/multinode/050-mariadb.sh
@@ -11,8 +11,6 @@ make -C ${HELM_CHART_ROOT_PATH} mariadb
 : ${OSH_EXTRA_HELM_ARGS:=""}
 helm upgrade --install mariadb ${HELM_CHART_ROOT_PATH}/mariadb \
     --namespace=openstack \
-    --set volume.use_local_path_for_single_pod_cluster.enabled=true \
-    --set volume.enabled=false \
     --values=/tmp/mariadb.yaml \
     ${OSH_EXTRA_HELM_ARGS} \
     ${OSH_EXTRA_HELM_ARGS_MARIADB}

修改openstack-helm-infra脚本 mariadb镜像名称

git diff mariadb/values.yaml
diff --git a/mariadb/values.yaml b/mariadb/values.yaml
index dcc905dc..0885b266 100644
--- a/mariadb/values.yaml
+++ b/mariadb/values.yaml
@@ -21,8 +21,8 @@ release_group: null
 images:
   tags:
     mariadb: docker.io/openstackhelm/mariadb:latest-ubuntu_focal
-    ingress: k8s.gcr.io/ingress-nginx/controller:v0.42.0
-    error_pages: k8s.gcr.io/defaultbackend:1.4
+    ingress: docker.io/willdockerhub/ingress-nginx-controller:v0.42.0
+    error_pages: docker.io/chenliujin/defaultbackend:1.4
     prometheus_create_mysql_user: docker.io/library/mariadb:10.5.9-focal
     prometheus_mysql_exporter: docker.io/prom/mysqld-exporter:v0.12.1
     prometheus_mysql_exporter_helm_tests: docker.io/openstackhelm/heat:newton-ubuntu_xenial

执行部署脚本

./tools/deployment/multinode/050-mariadb.sh

问题

# mariadb作为一个有状态服务,需要创建卷,因此,先创建ceph rbd块, 参考官网: https://docs.ceph.com/en/latest/rbd/rbd-kubernetes/
# 再helm安装得过程中,mariadb pod得状态是 没有ready得,这个问题解决参考如下:
kubectl get cm -n openstack
kubectl delete cm mariadb-mariadb-state -n openstack
helm delete mariadb --purge
# 然后再重新进行部署
./tools/deployment/multinode/050-mariadb.sh

RabbitMQ

执行安装脚本

./tools/deployment/multinode/060-rabbitmq.sh

问题解决:

vim /var/lib/kubelet/config.yaml

修改clusterDomain: cluster.local. 将后面得.去掉,然后重启kebelet服务

Memcached

执行安装脚本

./tools/deployment/multinode/070-memcached.sh

Keystone

执行安装脚本

./tools/deployment/multinode/080-keystone.sh

Glance

修改glance存储为rbd

git diff tools/deployment/multinode/100-glance.sh
diff --git a/tools/deployment/multinode/100-glance.sh b/tools/deployment/multinode/100-glance.sh
index 20bacd23..beba0683 100755
--- a/tools/deployment/multinode/100-glance.sh
+++ b/tools/deployment/multinode/100-glance.sh
@@ -23,7 +23,7 @@ make glance
 
 #NOTE: Deploy command
 : ${OSH_EXTRA_HELM_ARGS:=""}
-: ${GLANCE_BACKEND:="pvc"}
+: ${GLANCE_BACKEND:="rbd"}

执行安装脚本

./tools/deployment/multinode/100-glance.sh

Cinder

修改crush_rule

git diff tools/deployment/multinode/110-cinder.sh
diff --git a/tools/deployment/multinode/110-cinder.sh b/tools/deployment/multinode/110-cinder.sh
index 55f3af0a..a4136c6d 100755
--- a/tools/deployment/multinode/110-cinder.sh
+++ b/tools/deployment/multinode/110-cinder.sh
@@ -26,19 +26,19 @@ conf:
     pools:
       backup:
         replication: 1
-        crush_rule: same_host
+        crush_rule: replicated_rule
         chunk_size: 8
         app_name: cinder-backup
       # default pool used by rbd1 backend
       cinder.volumes:
         replication: 1
-        crush_rule: same_host
+        crush_rule: replicated_rule
         chunk_size: 8
         app_name: cinder-volume
       # secondary pool used by rbd2 backend
       cinder.volumes.gold:
         replication: 1
-        crush_rule: same_host
+        crush_rule: replicated_rule
         chunk_size: 8
         app_name: cinder-volume
   backends:

执行安装脚本

./tools/deployment/multinode/110-cinder.sh

OpenvSwitch

执行安装脚本

kubectl label nodes k8s-node-181 openvswitch=enabled
kubectl label nodes k8s-node-182 openvswitch=enabled
kubectl label nodes k8s-node-183 openvswitch=enabled

./tools/deployment/multinode/120-openvswitch.sh

Libvirt

执行安装脚本

# libvirt POD 为init状态,依赖neutron-ovs-agent pod
./tools/deployment/multinode/130-libvirt.sh

libvirt Pod启动完成之后,创建secret密钥, 此操作可以由Init容器初始化完成

# 原始容器中没有定义secret,再创建实例得时候会报错,手动进行创建。
virsh secret-list
 UUID                                  Usage
--------------------------------------------------------------------------------
# 进入容器中
kubectl exec -it libvirt-libvirt-default-2jwf6  -n openstack bash
生成密钥xml
cat << EOF > /etc/ceph.xml
<secret ephemeral="no" private="no">
<uuid>457eb676-33da-42ec-9a8c-9293d545c337</uuid>
<usage type="ceph">
<name>client.cinder. secret</name>
</usage>
</secret>
EOF
virsh secret-define --file /etc/ceph.xml
# secret UUID是定死得,base64是 ceph中client.cinder 得密钥
virsh secret-set-value --secret 457eb676-33da-42ec-9a8c-9293d545c337 --base64 AQAi3RRhsMJxGhAAWMZvqgo62ZZ4kUd30LvukA==
#重启libvirt容器

Compute Kit (Nova and Neutron)

修改配置

git diff tools/deployment/multinode/140-compute-kit.sh
diff --git a/tools/deployment/multinode/140-compute-kit.sh b/tools/deployment/multinode/140-compute-kit.sh
index 2fec7662..4d4367b7 100755
--- a/tools/deployment/multinode/140-compute-kit.sh
+++ b/tools/deployment/multinode/140-compute-kit.sh
@@ -16,7 +16,7 @@ set -xe
 : ${RUN_HELM_TESTS:="yes"}
 
 export OS_CLOUD=openstack_helm
-CEPH_ENABLED=false
+CEPH_ENABLED=true
 if openstack service list -f value -c Type | grep -q "^volume" && \
     openstack volume type list -f value -c Name | grep -q "rbd"; then
   CEPH_ENABLED=true
@@ -118,7 +118,7 @@ make neutron
 tee /tmp/neutron.yaml << EOF
 network:
   interface:
-    tunnel: docker0
+    tunnel: eth1

nova-bootstrap会下载外网镜像,提前准备好镜像,防止下载失败

wget http://10.2.11.2/dd_files/Kubernetes/hyperkube-amd64.tgz
docker load -i hyperkube-amd64.tgz

执行安装脚本

./tools/deployment/multinode/140-compute-kit.sh

安装得过程中,neutron可能安装失败,失败之后删掉重新跑

helm list
neutron                 1               Thu Aug 12 16:51:23 2021        FAILED          neutron-0.2.8                   v1.0.0          openstack
# 将charts删除
helm delete neutron --purge

重新执行

helm upgrade --install neutron ./neutron --namespace=openstack --values=/tmp/neutron.yaml --values=../openstack-helm/neutron/values_overrides/ussuri-ubuntu_bionic.yaml

Horizon

执行安装脚本

./tools/deployment/multinode/085-horizon.sh

dashboard安装完成之后,可能会造成资源访问不到得现象,解决如下:
安装traefik-ingress

wget http://10.2.11.2/dd_files/Kubernetes/traefik.tar.gz
tar -xvf traefik.tar.gz
cd traefik
# 创建资源
kubectl apply -f .
ingress.networking.k8s.io/horizon created
Warning: extensions/v1beta1 Ingress is deprecated in v1.14+, unavailable in v1.22+; use networking.k8s.io/v1 Ingress
ingress.extensions/horizon-a created
serviceaccount/traefik-ingress-controller created
deployment.apps/traefik-ingress-controller created
service/traefik-ingress-service created
clusterrole.rbac.authorization.k8s.io/traefik-ingress-controller created
clusterrolebinding.rbac.authorization.k8s.io/traefik-ingress-controller created
service/traefik-web-ui created
ingress.extensions/traefik-web-ui created
 
# 查看pod
kubectl get pod -n kube-system
traefik-ingress-controller-6dc7585cdc-97vhj   1/1     Running   0          2s
 
# 查看ingress
kubectl get ingress -n kube-system
NAME             CLASS    HOSTS                 ADDRESS   PORTS   AGE
traefik-web-ui   <none>   traefik-ui.minikube             80      2m7s

验证

查看ingress

kubectl get ingress -n openstack
NAME                          CLASS    HOSTS                                                                                               ADDRESS                               PORTS   AGE
cinder                      <none>   cinder,cinder.openstack,cinder.openstack.svc.cluster.local                                        10.2.11.177,10.2.11.178               80      50m
glance                      <none>   glance,glance.openstack,glance.openstack.svc.cluster.local                                        10.2.11.177,10.2.11.178               80      57m
horizon                     <none>   horizon,horizon.openstack,horizon.openstack.svc.cluster.local                                     10.2.11.177,10.2.11.178               80      99s
keystone                    <none>   keystone,keystone.openstack,keystone.openstack.svc.cluster.local                                  10.2.11.177,10.2.11.178               80      72m
metadata                    <none>   metadata,metadata.openstack,metadata.openstack.svc.cluster.local                                  10.2.11.177,10.2.11.178               80      34m
neutron                     <none>   neutron,neutron.openstack,neutron.openstack.svc.cluster.local                                     10.2.11.177,10.2.11.178               80      9m38s
nova                        <none>   nova,nova.openstack,nova.openstack.svc.cluster.local                                              10.2.11.177,10.2.11.178               80      34m
novncproxy                  <none>   novncproxy,novncproxy.openstack,novncproxy.openstack.svc.cluster.local                            10.2.11.177,10.2.11.178               80      34m
openstack-ingress-openstack <none>   *.openstack.svc.cluster.local                                                                     10.2.11.176,10.2.11.177,10.2.11.178   80      133m
placement                   <none>   placement,placement.openstack,placement.openstack.svc.cluster.local                               10.2.11.177,10.2.11.178               80      34m
rabbitmq-mgr-7b1733         <none>   rabbitmq-mgr-7b1733,rabbitmq-mgr-7b1733.openstack,rabbitmq-mgr-7b1733.openstack.svc.cluster.local 10.2.11.177,10.2.11.178               80      86m

配置/etc/hosts解析

10.2.11.176  k8s-1
10.2.11.177  k8s-2 cinder.openstack.svc.cluster.local glance.openstack.svc.cluster.local horizon.openstack.svc.cluster.local keystone.openstack.svc.cluster.local
10.2.11.178  k8s-3 neutron.openstack.svc.cluster.local nova.openstack.svc.cluster.local placement.openstack.svc.cluster.local

配置admin-rc环境变量

cat admin.rc
export OS_USERNAME=admin
export OS_PASSWORD=password
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_DOMAIN_NAME=default
export OS_AUTH_URL=http://keystone.openstack.svc.cluster.local/v3
export OS_IDENTITY_API_VERSION=3

windows配置解析: C:\Windows\System11\drivers\etc\hosts

10.2.11.178 horizon.openstack.svc.cluster.local
10.2.11.178 novncproxy.openstack.svc.cluster.local
10.2.11.178 traefik-ui.minikube

修改 vnc ingress

kubectl edit ingress novncproxy -n openstack
删除
medatada/annotations下得数据
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值