参考

KubeSphere 3.x 升级至 4.x 数据迁移
KubeSphere v4 安装指南
启用集群网关

升级注意事项

  • KS 卸载过程中会清理所有组件及数据,请谨慎操作,本文档数据迁移过程中仅对以下数据进行备份: 用户、企业空间、访问控制授权规则(仅限内置角色)
  • KS 的卸载过程不会影响除 KS 及组件之外的资源
  • 如何用到集群网关的记得先备份其配置

升级

在能使用kubectl的节点上执行

yum install -y jq
mkdir -p kubespher-v4-upgrade
cd kubespher-v4-upgrade

创建1_backup.sh脚本文件

#!/bin/bash
kubectl get users.iam.kubesphere.io  -o json | jq '.items[].apiVersion = "iam.kubesphere.io/v1beta1"' > backup-users.json
kubectl get globalrolebindings.iam.kubesphere.io  -o json | jq '.items[]?.apiVersion = "iam.kubesphere.io/v1beta1" | .items[]?.subjects[]?.apiGroup = "iam.kubesphere.io"' > backup-globalrolebindings.json
kubectl get clusterrolebindings -l iam.kubesphere.io/user-ref -o json | jq '.items[]?.apiVersion = "iam.kubesphere.io/v1beta1" | .items[]?.subjects[]?.apiGroup = "iam.kubesphere.io" | .items[]?.roleRef.apiGroup = "iam.kubesphere.io"' > backup-clusterrolebindings.json
kubectl get rolebindings -A -l iam.kubesphere.io/user-ref  -o json | jq '.items[]?.apiVersion = "iam.kubesphere.io/v1beta1" | .items[]?.subjects[]?.apiGroup = "iam.kubesphere.io" | .items[]?.roleRef.apiGroup = "iam.kubesphere.io"' > backup-rolebindings.json
kubectl get namespace -l kubesphere.io/workspace -o json | jq '.items[]?.metadata.labels["kubesphere.io/managed"] = "true"' > backup-namespaces.json
## 只需要在 host 集群上执行
kubectl get workspacetemplates.tenant.kubesphere.io -o json | jq '.items[].apiVersion = "tenant.kubesphere.io/v1beta1" | del(.items[].spec.template.spec.networkIsolation)' > backup-workspacetemplates.json
## 只需要在 host 集群上执行
kubectl get workspacerolebindings.iam.kubesphere.io  -o json | jq '.items[]?.apiVersion = "iam.kubesphere.io/v1beta1" | .items[]?.subjects[]?.apiGroup = "iam.kubesphere.io" | .items[]?.roleRef.apiGroup = "iam.kubesphere.io" | del(.items[].metadata.resourceVersion) | del(.items[].metadata.ownerReferences)' > backup-workspacerolebindings.json

创建2_uninstall_v3.sh脚本文件

#!/bin/bash
############################################################################################################
# 该脚本用于卸载集群中的 KubeSphere
#
# 注意: 执行该脚本前请确保当前集群已从 host 集群中解绑
############################################################################################################

set -x

# 清除集群所有 namespace 中的 workspace 标签
kubectl get ns -l kubesphere.io/workspace -o name | xargs -I {} bash -c "kubectl label {} kubesphere.io/workspace- && kubectl patch {} -p '{\"metadata\":{\"ownerReferences\":[]}}' --type=merge"

# # 清除集群所有 namespace 中的 kubefed 标签
kubectl get ns -l kubefed.io/managed -o name | xargs -I {} bash -c "kubectl label {} kubefed.io/managed- && kubectl patch {} -p '{\"metadata\":{\"ownerReferences\":[]}}' --type=merge"

# 清除集群中的 workspace 以及 workspacetemplate 资源
kubectl get workspacetemplate -A -o name | xargs -I {} kubectl patch {} -p '{"metadata":{"ownerReferences":[]}}' --type=merge
kubectl get workspace -A -o name | xargs -I {} kubectl patch {} -p '{"metadata":{"ownerReferences":[]}}' --type=merge

kubectl get workspacetemplate -A -o name | xargs -I {} kubectl delete {}
kubectl get workspace -A -o name | xargs -I {} kubectl delete {}

# 删除 clusterroles
delete_cluster_roles() {
  for role in `kubectl get clusterrole -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`
  do
    kubectl delete clusterrole $role 2>/dev/null
  done
}

delete_cluster_roles

# 删除 clusterrolebindings
delete_cluster_role_bindings() {
  for rolebinding in `kubectl get clusterrolebindings -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`
  do
    kubectl delete clusterrolebindings $rolebinding 2>/dev/null
  done
}
delete_cluster_role_bindings

# 删除 validatingwebhookconfigurations
for webhook in ks-events-admission-validate users.iam.kubesphere.io network.kubesphere.io validating-webhook-configuration resourcesquotas.quota.kubesphere.io
do
  kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io $webhook 2>/dev/null
done

# 删除 mutatingwebhookconfigurations
for webhook in ks-events-admission-mutate logsidecar-injector-admission-mutate mutating-webhook-configuration
do
  kubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io $webhook 2>/dev/null
done

# 卸载 ks-core
helm del -n kubesphere-system ks-core
helm del -n kubesphere-system ks-redis &> /dev/null || true
kubectl delete pvc -n kubesphere-system -l app=redis-ha --ignore-not-found || true
kubectl delete deploy -n kubesphere-system -l app.kubernetes.io/managed-by!=Helm --field-selector metadata.name=redis --ignore-not-found || true
kubectl delete svc -n kubesphere-system -l app.kubernetes.io/managed-by!=Helm --field-selector metadata.name=redis --ignore-not-found || true
kubectl delete secret -n kubesphere-system -l app.kubernetes.io/managed-by!=Helm --field-selector metadata.name=redis-secret --ignore-not-found || true
kubectl delete cm -n kubesphere-system -l app.kubernetes.io/managed-by!=Helm --field-selector metadata.name=redis-configmap --ignore-not-found || true
kubectl delete pvc -n kubesphere-system -l app.kubernetes.io/managed-by!=Helm --field-selector metadata.name=redis-pvc --ignore-not-found || true
kubectl delete deploy -n kubesphere-system --all --ignore-not-found
kubectl delete svc -n kubesphere-system --all --ignore-not-found
kubectl delete cm -n kubesphere-system --all --ignore-not-found
kubectl delete secret -n kubesphere-system --all --ignore-not-found
kubectl delete sa -n kubesphere-system --all --ignore-not-found

# 删除crd
CRD_NAMES=globalrolebindings.iam.kubesphere.io,globalroles.iam.kubesphere.io,users.iam.kubesphere.io,workspacerolebindings.iam.kubesphere.io,workspaceroles.iam.kubesphere.io,workspaces.tenant.kubesphere.io,workspacetemplates.tenant.kubesphere.io,gateways.gateway.kubesphere.io,loginrecords.iam.kubesphere.io,rolebases.iam.kubesphere.io,clusterconfigurations.installer.kubesphere.io,ippools.network.kubesphere.io,helmapplicationversions.application.kubesphere.io,clusterrulegroups.alerting.kubesphere.io,ipamblocks.network.kubesphere.io,helmcategories.application.kubesphere.io,strategies.servicemesh.kubesphere.io,federatedrolebindings.iam.kubesphere.io,nginxes.gateway.kubesphere.io,helmrepos.application.kubesphere.io,helmreleases.application.kubesphere.io,helmapplications.application.kubesphere.io,resourcequotas.quota.kubesphere.io,accessors.storage.kubesphere.io,groups.iam.kubesphere.io,clusters.cluster.kubesphere.io,servicepolicies.servicemesh.kubesphere.io,namespacenetworkpolicies.network.kubesphere.io,federatedusers.iam.kubesphere.io,federatedroles.iam.kubesphere.io,rulegroups.alerting.kubesphere.io,globalrulegroups.alerting.kubesphere.io,ipamhandles.network.kubesphere.io,groupbindings.iam.kubesphere.io,notificationmanagers.notification.kubesphere.io,routers.notification.kubesphere.io,silences.notification.kubesphere.io,configs.notification.kubesphere.io,receivers.notification.kubesphere.io,clusterpropagatedversions.core.kubefed.io,federatedservicestatuses.core.kubefed.io,federatedtypeconfigs.core.kubefed.io,kubefedclusters.core.kubefed.io,kubefedconfigs.core.kubefed.io,propagatedversions.core.kubefed.io,replicaschedulingpreferences.scheduling.kubefed.io,federateddeployments.types.kubefed.io,federatedserviceaccounts.types.kubefed.io,federatednotificationreceivers.types.kubefed.io,federatedapplications.types.kubefed.io,federatedglobalroles.types.kubefed.io,federatedusers.types.kubefed.io,federatedworkspaces.types.kubefed.io,federatedclusterrolebindings.types.kubefed.io,federatedgroupbindings.types.kubefed.io,federatednamespaces.types.kubefed.io,federatedworkspacerolebindings.types.kubefed.io,federatedingresses.types.kubefed.io,federatedworkspaceroles.types.kubefed.io,federatedglobalrolebindings.types.kubefed.io,federatedstatefulsets.types.kubefed.io,federatedsecrets.types.kubefed.io,federatedconfigmaps.types.kubefed.io,federatednotificationconfigs.types.kubefed.io,federatednotificationmanagers.types.kubefed.io,federatednotificationrouters.types.kubefed.io,federatedclusterroles.types.kubefed.io,federatedreplicasets.types.kubefed.io,federatednotificationsilences.types.kubefed.io,federatedgroups.types.kubefed.io,federatedpersistentvolumeclaims.types.kubefed.io,federatedjobs.types.kubefed.io,federatedservices.types.kubefed.io,federatedlimitranges.types.kubefed.io,labels.cluster.kubesphere.io,provisionercapabilities.storage.kubesphere.io,storageclasscapabilities.storage.kubesphere.io

for crd in `kubectl get crds -o jsonpath="{.items[*].metadata.name}"`  
do  
  if [[ ${CRD_NAMES[@]/${crd}/} != ${CRD_NAMES[@]} ]]; then  
     scop=$(eval echo $(kubectl get crd ${crd} -o jsonpath="{.spec.scope}"))  
     if [[ $scop =~ "Namespaced" ]] ; then  
        kubectl get $crd -A --no-headers | awk '{print $1" "$2" ""'$crd'"}' | xargs -n 3 sh -c 'kubectl patch $2 -n $0 $1 -p "{\"metadata\":{\"finalizers\":null}}" --type=merge 2>/dev/null && kubectl delete $2 -n $0 $1 2>/dev/null'  
     else  
        kubectl get $crd -A --no-headers | awk '{print $1" ""'$crd'"}' | xargs -n 2 sh -c 'kubectl patch $1 $0 -p "{\"metadata\":{\"finalizers\":null}}" --type=merge 2>/dev/null && kubectl delete $1 $0 2>/dev/null'  
     fi  
     kubectl delete crd $crd 2>/dev/null;  
  fi  
done


## 卸载 监控组件
# 删除 Prometheus/ALertmanager/ThanosRuler
kubectl -n kubesphere-monitoring-system delete Prometheus  k8s --ignore-not-found
kubectl -n kubesphere-monitoring-system delete secret additional-scrape-configs --ignore-not-found
kubectl -n kubesphere-monitoring-system delete serviceaccount prometheus-k8s --ignore-not-found
kubectl -n kubesphere-monitoring-system delete service prometheus-k8s --ignore-not-found
kubectl -n kubesphere-monitoring-system delete role prometheus-k8s-config --ignore-not-found
kubectl -n kubesphere-monitoring-system delete rolebinging prometheus-k8s-config --ignore-not-found

kubectl -n kubesphere-monitoring-system delete Alertmanager main --ignore-not-found
kubectl -n kubesphere-monitoring-system delete secret alertmanager-main --ignore-not-found
kubectl -n kubesphere-monitoring-system delete service alertmanager-main --ignore-not-found

kubectl -n kubesphere-monitoring-system delete ThanosRuler kubesphere --ignore-not-found

# 删除 ServiceMonitor/PrometheusRules
kubectl -n kubesphere-monitoring-system delete ServiceMonitor alertmanager coredns etcd ks-apiserver  kube-apiserver kube-controller-manager kube-proxy kube-scheduler kube-state-metrics kubelet node-exporter  prometheus prometheus-operator  s2i-operator  thanosruler --ignore-not-found
kubectl -n kubesphere-monitoring-system delete PrometheusRule kubesphere-rules prometheus-k8s-coredns-rules prometheus-k8s-etcd-rules prometheus-k8s-rules --ignore-not-found

# 删除 prometheus-operator
kubectl -n kubesphere-monitoring-system delete deployment prometheus-operator --ignore-not-found
kubectl -n kubesphere-monitoring-system delete service  prometheus-operator --ignore-not-found
kubectl -n kubesphere-monitoring-system delete serviceaccount prometheus-operator --ignore-not-found

# 删除 kube-state-metrics/node-exporter
kubectl -n kubesphere-monitoring-system delete deployment kube-state-metrics --ignore-not-found
kubectl -n kubesphere-monitoring-system delete service  kube-state-metrics --ignore-not-found
kubectl -n kubesphere-monitoring-system delete serviceaccount  kube-state-metrics --ignore-not-found

kubectl -n kubesphere-monitoring-system delete daemonset node-exporter --ignore-not-found
kubectl -n kubesphere-monitoring-system delete service node-exporter --ignore-not-found
kubectl -n kubesphere-monitoring-system delete serviceaccount node-exporter --ignore-not-found

# 删除 Clusterrole/ClusterRoleBinding
kubectl delete clusterrole kubesphere-prometheus-k8s kubesphere-kube-state-metrics kubesphere-node-exporter kubesphere-prometheus-operator
kubectl delete clusterrolebinding kubesphere-prometheus-k8s kubesphere-kube-state-metrics kubesphere-node-exporter kubesphere-prometheus-operator

# 删除 notification-manager
helm delete notification-manager -n kubesphere-monitoring-system

# 清理 kubesphere-monitoring-system
kubectl delete deploy -n kubesphere-monitoring-system --all --ignore-not-found

# 删除监控 crd
kubectl delete crd alertmanagerconfigs.monitoring.coreos.com
kubectl delete crd alertmanagers.monitoring.coreos.com
kubectl delete crd podmonitors.monitoring.coreos.com
kubectl delete crd probes.monitoring.coreos.com
kubectl delete crd prometheusagents.monitoring.coreos.com
kubectl delete crd prometheuses.monitoring.coreos.com
kubectl delete crd prometheusrules.monitoring.coreos.com
kubectl delete crd scrapeconfigs.monitoring.coreos.com
kubectl delete crd servicemonitors.monitoring.coreos.com
kubectl delete crd thanosrulers.monitoring.coreos.com
kubectl delete crd clusterdashboards.monitoring.kubesphere.io
kubectl delete crd dashboards.monitoring.kubesphere.io

# 删除 metrics-server
kubectl delete apiservice v1beta1.metrics.k8s.io
kubectl -n kube-system delete deploy metrics-server
kubectl -n kube-system delete service metrics-server
kubectl delete ClusterRoleBinding system:metrics-server
kubectl delete ClusterRoleBinding metrics-server:system:auth-delegator
kubectl -n kube-system delete RoleBinding  metrics-server-auth-reader
kubectl delete ClusterRole system:metrics-server
kubectl delete ClusterRole system:aggregated-metrics-reader
kubectl -n kube-system delete ServiceAccount ServiceAccount

## 卸载 日志组件
# 删除 fluent-bit
kubectl -n kubesphere-logging-system  delete fluentbitconfigs fluent-bit-config --ignore-not-found
kubectl -n kubesphere-logging-system patch fluentbit fluent-bit -p '{"metadata":{"finalizers":null}}' --type=merge
kubectl -n kubesphere-logging-system delete fluentbit fluent-bit --ignore-not-found

# 删除 ks-logging
helm del -n kubesphere-logging-system logsidecar-injector &> /dev/null || true

# 删除 ks-events
helm del -n kubesphere-logging-system ks-events &> /dev/null || true

# 删除 kube-auditing
helm del -n kubesphere-logging-system kube-auditing &> /dev/null || true

# 删除 es 
helm del -n kubesphere-logging-system elasticsearch-logging &> /dev/null || true
helm del -n kubesphere-logging-system elasticsearch-logging-curator &> /dev/null || true

# 清理 kubesphere-logging-system
kubectl delete deploy -n kubesphere-logging-system --all --ignore-not-found

创建3_install_v4.sh脚本文件

#!/bin/bash
helm upgrade --install -n kubesphere-system --create-namespace ks-core https://charts.kubesphere.io/main/ks-core-1.1.3.tgz --debug --wait

创建4_restore.sh脚本文件

#!/bin/bash
find . -type f  -name "backup-*.json" | xargs -n 1 cat | jq 'del(.items[].metadata.uid) | del(.items[].metadata.resourceVersion) | del(.items[].metadata.ownerReferences)' | kubectl apply -f -

按顺序执行脚本

chmod +x *.sh
./1_backup.sh
./2_uninstall_v3.sh
./3_install_v4.sh
./4_restore.sh

安装网关

登入控制台,进入左上角扩展市场,找到KubeSphere 网关点击安装。等待安装完成,进入集群管理->集群设置->网关设置(如果没有就等一会儿多刷新几次) 然后启用集群网关。右侧管理->编辑YAML进行相应配置。

同时记录页面上显示的IngressClassName比如kubesphere-router-cluster
然后所有的应用路由添加一下配置

kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
  name: console
  namespace: kubesphere-system
spec:
  ingressClassName: kubesphere-router-cluster  # 添加这一行

至此,路由应该已经能访问了

配置集群可见性

v4默认开启多集群模式,所以在升级后 现有企业空间看不到集群。

需要再集群管理->集群设置->集群可见性中进行对企业空间的授权。