From d82b780500b100bf1f297d9e40a7a19aeeb9a71e Mon Sep 17 00:00:00 2001 From: rambohe Date: Mon, 10 Jun 2024 19:03:02 +0800 Subject: [PATCH] use helm charts to install yurt-manager and yurthub components in local kind cluster instead of using yaml constants. (#2074) --- Makefile | 30 +- .../yurt-manager/templates/yurt-manager.yaml | 1 - hack/make-rules/local-up-openyurt.sh | 6 +- hack/make-rules/run-e2e-tests.sh | 44 --- pkg/node-servant/components/yurthub.go | 20 +- pkg/node-servant/constant.go | 2 +- pkg/node-servant/convert/convert.go | 12 +- pkg/node-servant/convert/options.go | 17 - pkg/node-servant/job.go | 2 +- pkg/node-servant/revert/revert.go | 4 +- test/e2e/cmd/init/constants/constants.go | 333 ------------------ test/e2e/cmd/init/converter.go | 261 ++++---------- test/e2e/cmd/init/converter_test.go | 82 ----- test/e2e/cmd/init/init.go | 234 ++++++++++-- test/e2e/cmd/init/init_test.go | 5 - test/e2e/cmd/init/kindoperator_test.go | 8 +- .../cmd/init/util/kubernetes/apply_addons.go | 100 ------ .../init/util/kubernetes/apply_addons_test.go | 73 ---- test/e2e/cmd/init/util/kubernetes/util.go | 256 +++----------- .../e2e/cmd/init/util/kubernetes/util_test.go | 298 +--------------- 20 files changed, 347 insertions(+), 1441 deletions(-) delete mode 100644 test/e2e/cmd/init/util/kubernetes/apply_addons_test.go diff --git a/Makefile b/Makefile index 9142e98c25d..d8e608edb1f 100644 --- a/Makefile +++ b/Makefile @@ -26,6 +26,13 @@ CRD_OPTIONS ?= "crd:crdVersions=v1,maxDescLen=1000" BUILD_KUSTOMIZE ?= _output/manifest GOPROXY ?= $(shell go env GOPROXY) +# Dynamic detection of operating system and architecture +OS := $(shell uname -s | tr '[:upper:]' '[:lower:]') +ARCH := $(shell uname -m) +ifeq ($(ARCH),x86_64) + ARCH := amd64 +endif + ifeq ($(shell git tag --points-at ${GIT_COMMIT}),) GIT_VERSION=$(IMAGE_TAG)-$(shell echo ${GIT_COMMIT} | cut -c 1-7) else @@ -65,6 +72,10 @@ KUBECTL ?= $(LOCALBIN)/kubectl YQ_VERSION := 4.13.2 YQ := $(shell command -v $(LOCALBIN)/yq 2> /dev/null) +HELM_VERSION ?= v3.9.3 +HELM ?= $(LOCALBIN)/helm +HELM_BINARY_URL := https://get.helm.sh/helm-$(HELM_VERSION)-$(OS)-$(ARCH).tar.gz + .PHONY: clean all build test all: test build @@ -96,7 +107,7 @@ verify-mod: hack/make-rules/verify_mod.sh # Start up OpenYurt cluster on local machine based on a Kind cluster -local-up-openyurt: +local-up-openyurt: install-helm KUBERNETESVERSION=${KUBERNETESVERSION} YURT_VERSION=$(GIT_VERSION) bash hack/make-rules/local-up-openyurt.sh # Build all OpenYurt components images and then start up OpenYurt cluster on local machine based on a Kind cluster @@ -113,6 +124,21 @@ docker-build-and-up-openyurt: docker-build e2e-tests: ENABLE_AUTONOMY_TESTS=${ENABLE_AUTONOMY_TESTS} TARGET_PLATFORMS=${TARGET_PLATFORMS} hack/make-rules/run-e2e-tests.sh + +install-helm: $(LOCALBIN) + @echo "Checking Helm installation..." + @HELM_CURRENT_VERSION=$$($(HELM) version --template="{{ .Version }}" 2>/dev/null || echo ""); \ + if [ "$$HELM_CURRENT_VERSION" != "$(HELM_VERSION)" ]; then \ + echo "Installing or upgrading Helm to version $(HELM_VERSION) into $(LOCALBIN)"; \ + curl -fsSL -o helm.tar.gz "$(HELM_BINARY_URL)"; \ + tar -xzf helm.tar.gz; \ + mv $(OS)-$(ARCH)/helm $(HELM); \ + rm -rf $(OS)-$(ARCH); \ + rm helm.tar.gz; \ + else \ + echo "Helm version $(HELM_VERSION) is already installed."; \ + fi + install-golint: ## check golint if not exist install golint tools ifeq ($(shell $(GLOBAL_GOLANGCILINT) version --format short), $(GOLANGCILINT_VERSION)) GOLINT_BIN=$(GLOBAL_GOLANGCILINT) @@ -222,7 +248,7 @@ kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. If wrong ver $(KUSTOMIZE): $(LOCALBIN) @if test -x $(LOCALBIN)/kustomize && ! $(LOCALBIN)/kustomize version | grep -q $(KUSTOMIZE_VERSION); then \ echo "$(LOCALBIN)/kustomize version is not expected $(KUSTOMIZE_VERSION). Removing it before installing."; \ - rm -rf $(LOCALBIN)/kustomize; \ + rm -f $(LOCALBIN)/kustomize; \ fi test -s $(LOCALBIN)/kustomize || { curl -Ss $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN); } diff --git a/charts/yurt-manager/templates/yurt-manager.yaml b/charts/yurt-manager/templates/yurt-manager.yaml index 0f9b3e573a6..9f72f3b1551 100644 --- a/charts/yurt-manager/templates/yurt-manager.yaml +++ b/charts/yurt-manager/templates/yurt-manager.yaml @@ -90,7 +90,6 @@ spec: - --metrics-addr=:{{ .Values.ports.metrics }} - --health-probe-addr=:{{ .Values.ports.healthProbe }} - --webhook-port={{ .Values.ports.webhook }} - - --logtostderr=true - --v={{ .Values.log.level }} - --working-namespace={{ .Release.Namespace }} {{- if .Values.leaderElectResourceName }} diff --git a/hack/make-rules/local-up-openyurt.sh b/hack/make-rules/local-up-openyurt.sh index dfc7e98b081..fa315b30289 100755 --- a/hack/make-rules/local-up-openyurt.sh +++ b/hack/make-rules/local-up-openyurt.sh @@ -66,10 +66,6 @@ readonly KUBERNETESVERSION=${KUBERNETESVERSION:-"v1.28"} readonly NODES_NUM=${NODES_NUM:-3} readonly KIND_KUBECONFIG=${KIND_KUBECONFIG:-${HOME}/.kube/config} readonly DISABLE_DEFAULT_CNI=${DISABLE_DEFAULT_CNI:-"false"} -ENABLE_DUMMY_IF=true -if [[ "${LOCAL_OS}" == darwin ]]; then - ENABLE_DUMMY_IF=false -fi function install_kind { echo "Begin to install kind" @@ -134,7 +130,7 @@ function local_up_openyurt { $YURT_ROOT/test/e2e/e2e.test init \ --kubernetes-version=${KUBERNETESVERSION} --kube-config=${KIND_KUBECONFIG} \ --cluster-name=${CLUSTER_NAME} --openyurt-version=${YURT_VERSION} --use-local-images --ignore-error \ - --node-num=${NODES_NUM} --enable-dummy-if=${ENABLE_DUMMY_IF} --disable-default-cni=${DISABLE_DEFAULT_CNI} + --node-num=${NODES_NUM} --disable-default-cni=${DISABLE_DEFAULT_CNI} } function cleanup { diff --git a/hack/make-rules/run-e2e-tests.sh b/hack/make-rules/run-e2e-tests.sh index defac21fcac..2864eedd16e 100755 --- a/hack/make-rules/run-e2e-tests.sh +++ b/hack/make-rules/run-e2e-tests.sh @@ -31,8 +31,6 @@ edgeNodeContainer2Name="openyurt-e2e-test-worker2" KUBECONFIG=${KUBECONFIG:-${HOME}/.kube/config} TARGET_PLATFORM=${TARGET_PLATFORMS:-linux/amd64} ENABLE_AUTONOMY_TESTS=${ENABLE_AUTONOMY_TESTS:-true} -USE_LOCAL_CNI=${USE_LOCAL_CNI:-false} -SKIP_SETUP_NETWORK=${SKIP_SETUP_NETWORK:-false} function set_flags() { goldflags="${GOLDFLAGS:--s -w $(project_info)}" @@ -45,44 +43,6 @@ function set_flags() { docker cp $KUBECONFIG $edgeNodeContainerName:/root/.kube/config } -# set up network -function set_up_network() { - # set up bridge cni plugins for every node - if [ "$USE_LOCAL_CNI" = "true" ]; then - if [ "$TARGET_PLATFORM" = "linux/amd64" ]; then - cp ${YURT_ROOT}/hack/cni/cni-plugins-linux-amd64-v1.4.1.tgz /tmp/cni.tgz - else - cp ${YURT_ROOT}/hack/cni/cni-plugins-linux-arm64-v1.4.1.tgz /tmp/cni.tgz - fi - else - if [ "$TARGET_PLATFORM" = "linux/amd64" ]; then - wget -O /tmp/cni.tgz https://github.com/containernetworking/plugins/releases/download/v1.4.1/cni-plugins-linux-amd64-v1.4.1.tgz - else - wget -O /tmp/cni.tgz https://github.com/containernetworking/plugins/releases/download/v1.4.1/cni-plugins-linux-arm64-v1.4.1.tgz - fi - fi - - - docker cp /tmp/cni.tgz $cloudNodeContainerName:/opt/cni/bin/ - docker exec -t $cloudNodeContainerName /bin/bash -c 'cd /opt/cni/bin && tar -zxf cni.tgz' - - docker cp /tmp/cni.tgz $edgeNodeContainerName:/opt/cni/bin/ - docker exec -t $edgeNodeContainerName /bin/bash -c 'cd /opt/cni/bin && tar -zxf cni.tgz' - - docker cp /tmp/cni.tgz $edgeNodeContainer2Name:/opt/cni/bin/ - docker exec -t $edgeNodeContainer2Name /bin/bash -c 'cd /opt/cni/bin && tar -zxf cni.tgz' - - # deploy flannel DaemonSet - local flannelYaml="https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml" - local flannelDs="kube-flannel-ds" - local flannelNameSpace="kube-flannel" - local POD_CREATE_TIMEOUT=120s - curl -o /tmp/flannel.yaml $flannelYaml - kubectl apply -f /tmp/flannel.yaml - # check if flannel on every node is ready, if so, "daemon set "kube-flannel-ds" successfully rolled out" - kubectl rollout status daemonset kube-flannel-ds -n kube-flannel --timeout=${POD_CREATE_TIMEOUT} -} - function cleanup { rm -rf "$YURT_ROOT/test/e2e/e2e.test" } @@ -166,10 +126,6 @@ function prepare_autonomy_tests { GOOS=${LOCAL_OS} GOARCH=${LOCAL_ARCH} set_flags -if [ "$SKIP_SETUP_NETWORK" != "true" ]; then - set_up_network -fi - cleanup get_ginkgo diff --git a/pkg/node-servant/components/yurthub.go b/pkg/node-servant/components/yurthub.go index a4baa538ee5..5c35504115e 100644 --- a/pkg/node-servant/components/yurthub.go +++ b/pkg/node-servant/components/yurthub.go @@ -34,11 +34,9 @@ import ( "github.com/openyurtio/openyurt/pkg/projectinfo" kubeconfigutil "github.com/openyurtio/openyurt/pkg/util/kubeconfig" - tmplutil "github.com/openyurtio/openyurt/pkg/util/templates" "github.com/openyurtio/openyurt/pkg/yurtadm/constants" enutil "github.com/openyurtio/openyurt/pkg/yurtadm/util/edgenode" "github.com/openyurtio/openyurt/pkg/yurthub/storage/disk" - "github.com/openyurtio/openyurt/pkg/yurthub/util" ) const ( @@ -52,25 +50,16 @@ const ( type yurtHubOperator struct { apiServerAddr string - yurthubImage string joinToken string - workingMode util.WorkingMode yurthubHealthCheckTimeout time.Duration - enableDummyIf bool - enableNodePool bool } // NewYurthubOperator new yurtHubOperator struct -func NewYurthubOperator(apiServerAddr string, yurthubImage string, joinToken string, - workingMode util.WorkingMode, yurthubHealthCheckTimeout time.Duration, enableDummyIf, enableNodePool bool) *yurtHubOperator { +func NewYurthubOperator(apiServerAddr string, joinToken string, yurthubHealthCheckTimeout time.Duration) *yurtHubOperator { return &yurtHubOperator{ apiServerAddr: apiServerAddr, - yurthubImage: yurthubImage, joinToken: joinToken, - workingMode: workingMode, yurthubHealthCheckTimeout: yurthubHealthCheckTimeout, - enableDummyIf: enableDummyIf, - enableNodePool: enableNodePool, } } @@ -101,12 +90,7 @@ func (op *yurtHubOperator) Install() error { return fmt.Errorf("could not read source file %s: %w", configMapDataPath, err) } klog.Infof("yurt-hub.yaml apiServerAddr: %+v", op.apiServerAddr) - yssYurtHub, err := tmplutil.SubsituteTemplate(string(content), map[string]string{ - "kubernetesServerAddr": op.apiServerAddr, - }) - if err != nil { - return err - } + yssYurtHub := strings.ReplaceAll(string(content), "KUBERNETES_SERVER_ADDRESS", op.apiServerAddr) if err = os.WriteFile(getYurthubYaml(podManifestPath), []byte(yssYurtHub), fileMode); err != nil { return err } diff --git a/pkg/node-servant/constant.go b/pkg/node-servant/constant.go index f71cf62dd92..67644c71f1b 100644 --- a/pkg/node-servant/constant.go +++ b/pkg/node-servant/constant.go @@ -54,7 +54,7 @@ spec: - /bin/sh - -c args: - - "/usr/local/bin/entry.sh convert --working-mode={{.working_mode}} --yurthub-image={{.yurthub_image}} {{if .yurthub_healthcheck_timeout}}--yurthub-healthcheck-timeout={{.yurthub_healthcheck_timeout}} {{end}}--join-token={{.joinToken}} {{if .enable_dummy_if}}--enable-dummy-if={{.enable_dummy_if}}{{end}} {{if .enable_node_pool}}--enable-node-pool={{.enable_node_pool}}{{end}}" + - "/usr/local/bin/entry.sh convert {{if .yurthub_healthcheck_timeout}}--yurthub-healthcheck-timeout={{.yurthub_healthcheck_timeout}} {{end}}--join-token={{.joinToken}}" securityContext: privileged: true volumeMounts: diff --git a/pkg/node-servant/convert/convert.go b/pkg/node-servant/convert/convert.go index 4abd111a06b..1894e99ef15 100644 --- a/pkg/node-servant/convert/convert.go +++ b/pkg/node-servant/convert/convert.go @@ -22,19 +22,14 @@ import ( "time" "github.com/openyurtio/openyurt/pkg/node-servant/components" - "github.com/openyurtio/openyurt/pkg/yurthub/util" ) // Config has the information that required by convert operation type Config struct { - yurthubImage string yurthubHealthCheckTimeout time.Duration - workingMode util.WorkingMode joinToken string kubeadmConfPaths []string openyurtDir string - enableDummyIf bool - enableNodePool bool } // nodeConverter do the convert job @@ -46,14 +41,10 @@ type nodeConverter struct { func NewConverterWithOptions(o *Options) *nodeConverter { return &nodeConverter{ Config: Config{ - yurthubImage: o.yurthubImage, yurthubHealthCheckTimeout: o.yurthubHealthCheckTimeout, - workingMode: util.WorkingMode(o.workingMode), joinToken: o.joinToken, kubeadmConfPaths: strings.Split(o.kubeadmConfPaths, ","), openyurtDir: o.openyurtDir, - enableDummyIf: o.enableDummyIf, - enableNodePool: o.enableNodePool, }, } } @@ -79,8 +70,7 @@ func (n *nodeConverter) installYurtHub() error { if apiServerAddress == "" { return fmt.Errorf("get apiServerAddress empty") } - op := components.NewYurthubOperator(apiServerAddress, n.yurthubImage, n.joinToken, - n.workingMode, n.yurthubHealthCheckTimeout, n.enableDummyIf, n.enableNodePool) + op := components.NewYurthubOperator(apiServerAddress, n.joinToken, n.yurthubHealthCheckTimeout) return op.Install() } diff --git a/pkg/node-servant/convert/options.go b/pkg/node-servant/convert/options.go index 67ce7de3e5d..880d6a5dc66 100644 --- a/pkg/node-servant/convert/options.go +++ b/pkg/node-servant/convert/options.go @@ -25,7 +25,6 @@ import ( "github.com/openyurtio/openyurt/pkg/node-servant/components" "github.com/openyurtio/openyurt/pkg/yurtadm/constants" - hubutil "github.com/openyurtio/openyurt/pkg/yurthub/util" ) const ( @@ -35,27 +34,19 @@ const ( // Options has the information that required by convert operation type Options struct { - yurthubImage string yurthubHealthCheckTimeout time.Duration - workingMode string joinToken string kubeadmConfPaths string openyurtDir string - enableDummyIf bool - enableNodePool bool Version bool } // NewConvertOptions creates a new Options func NewConvertOptions() *Options { return &Options{ - yurthubImage: "openyurt/yurthub:latest", yurthubHealthCheckTimeout: defaultYurthubHealthCheckTimeout, - workingMode: string(hubutil.WorkingModeEdge), kubeadmConfPaths: strings.Join(components.GetDefaultKubeadmConfPath(), ","), openyurtDir: constants.OpenyurtDir, - enableDummyIf: true, - enableNodePool: true, } } @@ -65,21 +56,13 @@ func (o *Options) Validate() error { return fmt.Errorf("join token(bootstrap token) is empty") } - if !hubutil.IsSupportedWorkingMode(hubutil.WorkingMode(o.workingMode)) { - return fmt.Errorf("workingMode must be pointed out as cloud or edge. got %s", o.workingMode) - } - return nil } // AddFlags sets flags. func (o *Options) AddFlags(fs *pflag.FlagSet) { - fs.StringVar(&o.yurthubImage, "yurthub-image", o.yurthubImage, "The yurthub image.") fs.DurationVar(&o.yurthubHealthCheckTimeout, "yurthub-healthcheck-timeout", o.yurthubHealthCheckTimeout, "The timeout for yurthub health check.") fs.StringVarP(&o.kubeadmConfPaths, "kubeadm-conf-path", "k", o.kubeadmConfPaths, "The path to kubelet service conf that is used by kubelet component to join the cluster on the work node. Support multiple values, will search in order until get the file.(e.g -k kbcfg1,kbcfg2)") fs.StringVar(&o.joinToken, "join-token", o.joinToken, "The token used by yurthub for joining the cluster.") - fs.StringVar(&o.workingMode, "working-mode", o.workingMode, "The node type cloud/edge, effect yurthub workingMode.") - fs.BoolVar(&o.enableDummyIf, "enable-dummy-if", o.enableDummyIf, "Enable dummy interface for yurthub or not.") - fs.BoolVar(&o.enableNodePool, "enable-node-pool", o.enableNodePool, "Enable list/watch nodepools for yurthub or not.") fs.BoolVar(&o.Version, "version", o.Version, "print the version information.") } diff --git a/pkg/node-servant/job.go b/pkg/node-servant/job.go index b7dee4911e1..339a0bfefc2 100644 --- a/pkg/node-servant/job.go +++ b/pkg/node-servant/job.go @@ -84,7 +84,7 @@ func validate(action string, tmplCtx map[string]string, nodeName string) error { switch action { case "convert": - keysMustHave := []string{"node_servant_image", "yurthub_image", "joinToken"} + keysMustHave := []string{"node_servant_image", "joinToken"} return checkKeys(keysMustHave, tmplCtx) case "revert": keysMustHave := []string{"node_servant_image"} diff --git a/pkg/node-servant/revert/revert.go b/pkg/node-servant/revert/revert.go index 5d5edf21397..8567f539d5f 100644 --- a/pkg/node-servant/revert/revert.go +++ b/pkg/node-servant/revert/revert.go @@ -20,7 +20,6 @@ import ( "time" "github.com/openyurtio/openyurt/pkg/node-servant/components" - "github.com/openyurtio/openyurt/pkg/yurthub/util" ) // NodeReverter do the revert job @@ -61,8 +60,7 @@ func (n *nodeReverter) revertKubelet() error { } func (n *nodeReverter) unInstallYurtHub() error { - op := components.NewYurthubOperator("", "", "", - util.WorkingModeCloud, time.Duration(1), true, true) // params is not important here + op := components.NewYurthubOperator("", "", time.Duration(1)) // params is not important here return op.UnInstall() } diff --git a/test/e2e/cmd/init/constants/constants.go b/test/e2e/cmd/init/constants/constants.go index f90bc38636b..3e73a1168af 100644 --- a/test/e2e/cmd/init/constants/constants.go +++ b/test/e2e/cmd/init/constants/constants.go @@ -16,343 +16,10 @@ limitations under the License. package constants -import "github.com/openyurtio/openyurt/pkg/projectinfo" - -var ( - // AnnotationAutonomy is used to identify if a node is autonomous - AnnotationAutonomy = projectinfo.GetAutonomyAnnotation() -) - const ( YurtctlLockConfigMapName = "yurtctl-lock" DefaultOpenYurtVersion = "latest" TmpDownloadDir = "/tmp" DirMode = 0755 FileMode = 0666 - - YurthubComponentName = "yurt-hub" - YurthubNamespace = "kube-system" - YurthubCmName = "yurt-hub-cfg" - - YurtManagerCertsSecret = ` -apiVersion: v1 -kind: Secret -metadata: - name: yurt-manager-webhook-certs - namespace: kube-system -` - - YurtManagerServiceAccount = ` -apiVersion: v1 -kind: ServiceAccount -metadata: - name: yurt-manager - namespace: kube-system -` - - YurtManagerClusterRoleBinding = ` -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: yurt-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: yurt-manager-role -subjects: -- kind: ServiceAccount - name: yurt-manager - namespace: kube-system -` - - YurtManagerRoleBinding = ` -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: yurt-manager-role-binding - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: yurt-manager-role -subjects: -- kind: ServiceAccount - name: yurt-manager - namespace: kube-system -` - - YurtManagerService = ` -apiVersion: v1 -kind: Service -metadata: - name: yurt-manager-webhook-service - namespace: kube-system -spec: - ports: - - port: 443 - protocol: TCP - targetPort: 10273 - selector: - app.kubernetes.io/name: yurt-manager -` - // YurtManagerDeployment defines the yurt manager deployment in yaml format - YurtManagerDeployment = ` -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/name: yurt-manager - app.kubernetes.io/version: v1.3.0 - name: yurt-manager - namespace: "kube-system" -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: yurt-manager - template: - metadata: - labels: - app.kubernetes.io/name: yurt-manager - spec: - tolerations: - - operator: "Exists" - hostNetwork: true - containers: - - args: - - --metrics-addr=:10271 - - --health-probe-addr=:10272 - - --webhook-port=10273 - - --controllers=* - - --v=4 - command: - - /usr/local/bin/yurt-manager - image: {{.image}} - imagePullPolicy: IfNotPresent - name: yurt-manager - ports: - - containerPort: 10273 - name: webhook-server - protocol: TCP - - containerPort: 10271 - name: metrics - protocol: TCP - - containerPort: 10272 - name: health - protocol: TCP - livenessProbe: - httpGet: - path: /healthz - port: 10272 - initialDelaySeconds: 60 - timeoutSeconds: 2 - periodSeconds: 10 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /readyz - port: 10272 - initialDelaySeconds: 60 - timeoutSeconds: 2 - periodSeconds: 10 - failureThreshold: 2 - serviceAccountName: yurt-manager - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: {{.edgeNodeLabel}} - operator: In - values: - - "false" -` - - YurthubClusterRole = ` -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: yurt-hub -rules: - - apiGroups: - - "" - resources: - - events - verbs: - - get - - apiGroups: - - apps.openyurt.io - resources: - - nodepools - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch -` - YurthubClusterRoleBinding = ` -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: yurt-hub -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: yurt-hub -subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:nodes -` - YurthubConfigMap = ` -apiVersion: v1 -kind: ConfigMap -metadata: - name: yurt-hub-cfg - namespace: kube-system -data: - cache_agents: "" - servicetopology: "" - discardcloudservice: "" - masterservice: "" -` - - YurthubCloudYurtStaticSet = ` -apiVersion: apps.openyurt.io/v1alpha1 -kind: YurtStaticSet -metadata: - name: yurt-hub-cloud - namespace: "kube-system" -spec: - staticPodManifest: yurthub - template: - metadata: - labels: - k8s-app: yurt-hub-cloud - spec: - volumes: - - name: hub-dir - hostPath: - path: /var/lib/yurthub - type: DirectoryOrCreate - - name: kubernetes - hostPath: - path: /etc/kubernetes - type: Directory - containers: - - name: yurt-hub - image: {{.yurthub_image}} - imagePullPolicy: IfNotPresent - volumeMounts: - - name: hub-dir - mountPath: /var/lib/yurthub - - name: kubernetes - mountPath: /etc/kubernetes - command: - - yurthub - - --v=2 - - --bind-address=127.0.0.1 - - --server-addr={{.kubernetesServerAddr}} - - --node-name=$(NODE_NAME) - - --bootstrap-file=/var/lib/yurthub/bootstrap-hub.conf - - --working-mode=cloud - - --namespace="kube-system" - livenessProbe: - httpGet: - host: 127.0.0.1 - path: /v1/healthz - port: 10267 - initialDelaySeconds: 300 - periodSeconds: 5 - failureThreshold: 3 - resources: - requests: - cpu: 150m - memory: 150Mi - limits: - memory: 300Mi - securityContext: - capabilities: - add: [ "NET_ADMIN", "NET_RAW" ] - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - hostNetwork: true - priorityClassName: system-node-critical - priority: 2000001000 -` - YurthubYurtStaticSet = ` -apiVersion: apps.openyurt.io/v1alpha1 -kind: YurtStaticSet -metadata: - name: yurt-hub - namespace: "kube-system" -spec: - staticPodManifest: yurthub - template: - metadata: - labels: - k8s-app: yurt-hub - spec: - volumes: - - name: hub-dir - hostPath: - path: /var/lib/yurthub - type: DirectoryOrCreate - - name: kubernetes - hostPath: - path: /etc/kubernetes - type: Directory - containers: - - name: yurt-hub - image: {{.yurthub_image}} - imagePullPolicy: IfNotPresent - volumeMounts: - - name: hub-dir - mountPath: /var/lib/yurthub - - name: kubernetes - mountPath: /etc/kubernetes - command: - - yurthub - - --v=2 - - --bind-address=127.0.0.1 - - --server-addr={{.kubernetesServerAddr}} - - --node-name=$(NODE_NAME) - - --bootstrap-file=/var/lib/yurthub/bootstrap-hub.conf - - --working-mode=edge - - --namespace="kube-system" - livenessProbe: - httpGet: - host: 127.0.0.1 - path: /v1/healthz - port: 10267 - initialDelaySeconds: 300 - periodSeconds: 5 - failureThreshold: 3 - resources: - requests: - cpu: 150m - memory: 150Mi - limits: - memory: 300Mi - securityContext: - capabilities: - add: [ "NET_ADMIN", "NET_RAW" ] - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - hostNetwork: true - priorityClassName: system-node-critical - priority: 2000001000 -` ) diff --git a/test/e2e/cmd/init/converter.go b/test/e2e/cmd/init/converter.go index 7fd6cdffdf9..4dbafcb8fa2 100644 --- a/test/e2e/cmd/init/converter.go +++ b/test/e2e/cmd/init/converter.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "os" + "os/exec" "path/filepath" "strconv" "strings" @@ -27,24 +28,17 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" kubeclientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" bootstrapapi "k8s.io/cluster-bootstrap/token/api" "k8s.io/klog/v2" nodeservant "github.com/openyurtio/openyurt/pkg/node-servant" kubeadmapi "github.com/openyurtio/openyurt/pkg/util/kubernetes/kubeadm/app/phases/bootstraptoken/clusterinfo" strutil "github.com/openyurtio/openyurt/pkg/util/strings" - tmplutil "github.com/openyurtio/openyurt/pkg/util/templates" - "github.com/openyurtio/openyurt/pkg/yurthub/util" - "github.com/openyurtio/openyurt/test/e2e/cmd/init/constants" "github.com/openyurtio/openyurt/test/e2e/cmd/init/lock" kubeutil "github.com/openyurtio/openyurt/test/e2e/cmd/init/util/kubernetes" ) @@ -58,7 +52,6 @@ const ( type ClusterConverter struct { RootDir string - ComponentsBuilder *kubeutil.Builder ClientSet kubeclientset.Interface CloudNodes []string EdgeNodes []string @@ -68,7 +61,6 @@ type ClusterConverter struct { YurtManagerImage string NodeServantImage string YurthubImage string - EnableDummyIf bool } func (c *ClusterConverter) Run() error { @@ -88,7 +80,7 @@ func (c *ClusterConverter) Run() error { } klog.Info("Deploying yurt-manager") - if err := c.deployYurtManager(); err != nil { + if err := c.installYurtManagerByHelm(); err != nil { klog.Errorf("failed to deploy yurt-manager with image %s, %s", c.YurtManagerImage, err) return err } @@ -96,7 +88,7 @@ func (c *ClusterConverter) Run() error { klog.Info("Running jobs for convert. Job running may take a long time, and job failure will not affect the execution of the next stage") klog.Info("Running node-servant-convert jobs to deploy the yurt-hub and reset the kubelet service on edge and cloud nodes") - if err := c.deployYurthub(); err != nil { + if err := c.installYurthubByHelm(); err != nil { klog.Errorf("error occurs when deploying Yurthub, %v", err) return err } @@ -118,12 +110,22 @@ func (c *ClusterConverter) labelEdgeNodes() error { return nil } -func (c *ClusterConverter) deployYurthub() error { - // wait YurtStaticSet is ready for using - if err := waitForCRDReady(c.KubeConfigPath, "yurtstaticsets.apps.openyurt.io"); err != nil { +func (c *ClusterConverter) installYurthubByHelm() error { + helmPath := filepath.Join(c.RootDir, "bin", "helm") + yurthubChartPath := filepath.Join(c.RootDir, "charts", "yurthub") + + parts := strings.Split(c.YurthubImage, "/") + imageTagParts := strings.Split(parts[len(parts)-1], ":") + tag := imageTagParts[1] + + // create the yurthub-cloud and yurthub yss + cmd := exec.Command(helmPath, "install", "yurthub", yurthubChartPath, "--namespace", "kube-system", "--set", fmt.Sprintf("kubernetesServerAddr=KUBERNETES_SERVER_ADDRESS,image.tag=%s", tag)) + output, err := cmd.CombinedOutput() + if err != nil { + klog.Errorf("couldn't install yurthub, %v, %s", err, string(output)) return err } - + klog.Infof("start to install yurthub, %s", string(output)) // deploy yurt-hub and reset the kubelet service on edge nodes. joinToken, err := prepareYurthubStart(c.ClientSet, c.KubeConfigPath) if err != nil { @@ -131,99 +133,35 @@ func (c *ClusterConverter) deployYurthub() error { } convertCtx := map[string]string{ "node_servant_image": c.NodeServantImage, - "yurthub_image": c.YurthubImage, "joinToken": joinToken, - // The node-servant will detect the kubeadm_conf_path automatically - // It will be either "/usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf" - // or "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf". - "kubeadm_conf_path": "", - "working_mode": string(util.WorkingModeEdge), - "enable_dummy_if": strconv.FormatBool(c.EnableDummyIf), - "kubernetesServerAddr": "{{.kubernetesServerAddr}}", } if c.YurthubHealthCheckTimeout != defaultYurthubHealthCheckTimeout { convertCtx["yurthub_healthcheck_timeout"] = c.YurthubHealthCheckTimeout.String() } - // create the yurthub-cloud and yurthub yss - tempDir, err := os.MkdirTemp(c.RootDir, "yurt-hub") - if err != nil { - return err - } - defer os.RemoveAll(tempDir) - tempFile := filepath.Join(tempDir, "yurthub-cloud-yurtstaticset.yaml") - yssYurtHubCloud, err := tmplutil.SubsituteTemplate(constants.YurthubCloudYurtStaticSet, convertCtx) - if err != nil { - return err - } - if err = os.WriteFile(tempFile, []byte(yssYurtHubCloud), 0644); err != nil { - return err - } - if err = c.ComponentsBuilder.InstallComponents(tempFile, false); err != nil { - return err - } - - tempFile = filepath.Join(tempDir, "yurthub-yurtstaticset.yaml") - yssYurtHub, err := tmplutil.SubsituteTemplate(constants.YurthubYurtStaticSet, convertCtx) - if err != nil { - return err - } - if err = os.WriteFile(tempFile, []byte(yssYurtHub), 0644); err != nil { - return err - } - if err = c.ComponentsBuilder.InstallComponents(tempFile, false); err != nil { - return err - } - - npExist, err := nodePoolResourceExists(c.ClientSet) - if err != nil { - return err - } - convertCtx["enable_node_pool"] = strconv.FormatBool(npExist) - klog.Infof("convert context for edge nodes(%q): %#+v", c.EdgeNodes, convertCtx) - if len(c.EdgeNodes) != 0 { - convertCtx["working_mode"] = string(util.WorkingModeEdge) convertCtx["configmap_name"] = yssYurtHubName if err = kubeutil.RunServantJobs(c.ClientSet, c.WaitServantJobTimeout, func(nodeName string) (*batchv1.Job, error) { return nodeservant.RenderNodeServantJob("convert", convertCtx, nodeName) - }, c.EdgeNodes, os.Stderr, false); err != nil { + }, c.EdgeNodes, os.Stderr); err != nil { // print logs of yurthub for i := range c.EdgeNodes { hubPodName := fmt.Sprintf("yurt-hub-%s", c.EdgeNodes[i]) pod, logErr := c.ClientSet.CoreV1().Pods("kube-system").Get(context.TODO(), hubPodName, metav1.GetOptions{}) if logErr == nil { - kubeutil.PrintPodLog(c.ClientSet, pod, os.Stderr) + kubeutil.DumpPod(c.ClientSet, pod, os.Stderr) } } - - // print logs of yurt-manager - podList, logErr := c.ClientSet.CoreV1().Pods("kube-system").List(context.TODO(), metav1.ListOptions{ - LabelSelector: labels.SelectorFromSet(map[string]string{"app.kubernetes.io/name": "yurt-manager"}).String(), - }) - if logErr != nil { - klog.Errorf("failed to get yurt-manager pod, %v", logErr) - return err - } - - if len(podList.Items) == 0 { - klog.Errorf("yurt-manager pod doesn't exist") - return err - } - if logErr = kubeutil.PrintPodLog(c.ClientSet, &podList.Items[0], os.Stderr); logErr != nil { - return err - } return err } } // deploy yurt-hub and reset the kubelet service on cloud nodes - convertCtx["working_mode"] = string(util.WorkingModeCloud) convertCtx["configmap_name"] = yssYurtHubCloudName klog.Infof("convert context for cloud nodes(%q): %#+v", c.CloudNodes, convertCtx) if err = kubeutil.RunServantJobs(c.ClientSet, c.WaitServantJobTimeout, func(nodeName string) (*batchv1.Job, error) { return nodeservant.RenderNodeServantJob("convert", convertCtx, nodeName) - }, c.CloudNodes, os.Stderr, false); err != nil { + }, c.CloudNodes, os.Stderr); err != nil { return err } @@ -239,11 +177,6 @@ func prepareYurthubStart(cliSet kubeclientset.Interface, kcfg string) (string, e return "", err } - // prepare global settings(like RBAC, configmap) for yurthub - if err := kubeutil.DeployYurthubSetting(cliSet); err != nil { - return "", err - } - // prepare join-token for yurthub joinToken, err := kubeutil.GetOrCreateJoinTokenString(cliSet) if err != nil || joinToken == "" { @@ -271,53 +204,24 @@ func prepareClusterInfoConfigMap(client kubeclientset.Interface, file string) er return nil } -func nodePoolResourceExists(client kubeclientset.Interface) (bool, error) { - groupVersion := schema.GroupVersion{ - Group: "apps.openyurt.io", - Version: "v1alpha1", - } - apiResourceList, err := client.Discovery().ServerResourcesForGroupVersion(groupVersion.String()) - if err != nil && !apierrors.IsNotFound(err) { - klog.Errorf("failed to discover nodepool resource, %v", err) - return false, err - } else if apiResourceList == nil { - return false, nil - } +func (c *ClusterConverter) installYurtManagerByHelm() error { + helmPath := filepath.Join(c.RootDir, "bin", "helm") + yurtManagerChartPath := filepath.Join(c.RootDir, "charts", "yurt-manager") - for i := range apiResourceList.APIResources { - if apiResourceList.APIResources[i].Name == "nodepools" && apiResourceList.APIResources[i].Kind == "NodePool" { - return true, nil - } - } - return false, nil -} - -func (c *ClusterConverter) deployYurtManager() error { - // install all crds for yurt-manager - if err := c.ComponentsBuilder.InstallComponents(filepath.Join(c.RootDir, "charts/yurt-manager/crds"), false); err != nil { - return err - } + parts := strings.Split(c.YurtManagerImage, "/") + imageTagParts := strings.Split(parts[len(parts)-1], ":") + tag := imageTagParts[1] - // create auto generated yaml(including clusterrole and webhooks) for yurt-manager - renderedFile, err := generatedAutoGeneratedTempFile(c.RootDir, "kube-system") + cmd := exec.Command(helmPath, "install", "yurt-manager", yurtManagerChartPath, "--namespace", "kube-system", "--set", fmt.Sprintf("image.tag=%s", tag)) + output, err := cmd.CombinedOutput() if err != nil { + klog.Errorf("couldn't install yurt-manager, %v", err) return err } - - // get renderedFile parent dir - renderedFileDir := filepath.Dir(renderedFile) - defer os.RemoveAll(renderedFileDir) - if err := c.ComponentsBuilder.InstallComponents(renderedFile, false); err != nil { - return err - } - - // create yurt-manager - if err := kubeutil.CreateYurtManager(c.ClientSet, c.YurtManagerImage); err != nil { - return err - } + klog.Infof("start to install yurt-manager, %s", string(output)) // waiting yurt-manager pod ready - return wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) { + if err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) { podList, err := c.ClientSet.CoreV1().Pods("kube-system").List(context.TODO(), metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{"app.kubernetes.io/name": "yurt-manager"}).String(), }) @@ -325,91 +229,46 @@ func (c *ClusterConverter) deployYurtManager() error { klog.Errorf("failed to list yurt-manager pod, %v", err) return false, nil } else if len(podList.Items) == 0 { - klog.Infof("no yurt-manager pod: %#v", podList) + klog.Infof("there is no yurt-manager pod now") + return false, nil + } else if podList.Items[0].Status.Phase != corev1.PodRunning { + klog.Infof("status phase of yurt-manager pod is not running, now is %s", string(podList.Items[0].Status.Phase)) return false, nil } - if podList.Items[0].Status.Phase == corev1.PodRunning { - for i := range podList.Items[0].Status.Conditions { - if podList.Items[0].Status.Conditions[i].Type == corev1.PodReady && - podList.Items[0].Status.Conditions[i].Status != corev1.ConditionTrue { - klog.Infof("pod(%s/%s): %#v", podList.Items[0].Namespace, podList.Items[0].Name, podList.Items[0]) - return false, nil - } - if podList.Items[0].Status.Conditions[i].Type == corev1.ContainersReady && - podList.Items[0].Status.Conditions[i].Status != corev1.ConditionTrue { - klog.Info("yurt manager's container is not ready") - return false, nil - } + for i := range podList.Items[0].Status.Conditions { + if podList.Items[0].Status.Conditions[i].Type == corev1.PodReady && + podList.Items[0].Status.Conditions[i].Status != corev1.ConditionTrue { + klog.Infof("ready condition of pod(%s/%s) is not true", podList.Items[0].Namespace, podList.Items[0].Name) + return false, nil + } + if podList.Items[0].Status.Conditions[i].Type == corev1.ContainersReady && + podList.Items[0].Status.Conditions[i].Status != corev1.ConditionTrue { + klog.Info("container ready condition is not true") + return false, nil } } - return true, nil - }) -} - -// generatedAutoGeneratedTempFile will replace {{ .Release.Namespace }} with ns in webhooks -func generatedAutoGeneratedTempFile(root, ns string) (string, error) { - tempDir, err := os.MkdirTemp(root, "yurt-manager") - if err != nil { - return "", err - } - autoGeneratedYaml := filepath.Join(root, "charts/yurt-manager/templates/yurt-manager-auto-generated.yaml") - contents, err := os.ReadFile(autoGeneratedYaml) - if err != nil { - return "", err - } - - lines := strings.Split(string(contents), "\n") - for i, line := range lines { - if strings.Contains(line, "{{ .Release.Namespace }}") { - lines[i] = strings.ReplaceAll(line, "{{ .Release.Namespace }}", ns) + return true, nil + }); err != nil { + // print logs of yurt-manager + podList, logErr := c.ClientSet.CoreV1().Pods("kube-system").List(context.TODO(), metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{"app.kubernetes.io/name": "yurt-manager"}).String(), + }) + if logErr != nil { + klog.Errorf("failed to get yurt-manager pod, %v", logErr) + return err } - } - newContents := strings.Join(lines, "\n") - - tempFile := filepath.Join(tempDir, "yurt-manager-auto-generated.yaml") - klog.Infof("rendered yurt-manager-auto-generated.yaml file: \n%s\n", newContents) - return tempFile, os.WriteFile(tempFile, []byte(newContents), 0644) -} - -func waitForCRDReady(kubeconfigPath, crdName string) error { - // Load the kubeconfig file to get a config object. - config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) - if err != nil { - return fmt.Errorf("failed to build config from kubeconfig path: %v", err) - } - - // Create a new clientset for the CRD - apiextensionsClient, err := clientset.NewForConfig(config) - if err != nil { - return fmt.Errorf("failed to create apiextensions client: %v", err) - } - - // Use a poll with a timeout to check for CRD existence and readiness - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - err = wait.PollUntilContextCancel(ctx, 1*time.Second, true, func(ctx context.Context) (bool, error) { - crd, err := apiextensionsClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crdName, metav1.GetOptions{}) - if err != nil { - return false, nil // Retry on error + if len(podList.Items) == 0 { + klog.Errorf("yurt-manager pod doesn't exist") + return err } - - for i := range crd.Status.Conditions { - if crd.Status.Conditions[i].Type == apiextensionsv1.Established { - if crd.Status.Conditions[i].Status == apiextensionsv1.ConditionTrue { - return true, nil // CRD is ready - } - } + if logErr = kubeutil.DumpPod(c.ClientSet, &podList.Items[0], os.Stderr); logErr != nil { + return err } - return false, nil // Retry on not ready - }) - - if err != nil { - return fmt.Errorf("CRD %s is not ready within the timeout period: %v", crdName, err) + return err } - klog.Infof("CRD %s is ready\n", crdName) return nil } diff --git a/test/e2e/cmd/init/converter_test.go b/test/e2e/cmd/init/converter_test.go index da42742efa9..646125163b2 100644 --- a/test/e2e/cmd/init/converter_test.go +++ b/test/e2e/cmd/init/converter_test.go @@ -38,7 +38,6 @@ func NewClusterConverter(ki *Initializer) *ClusterConverter { YurtManagerImage: ki.YurtManagerImage, NodeServantImage: ki.NodeServantImage, YurthubImage: ki.YurtHubImage, - EnableDummyIf: ki.EnableDummyIf, } return converter } @@ -100,84 +99,3 @@ func TestPrepareYurthubStart(t *testing.T) { t.Errorf("failed to prepare yurthub start ") } } - -func TestNodePoolResourceExists(t *testing.T) { - - cases := []struct { - apiResourceObj *metav1.APIResourceList - want error - isExist bool - }{ - { - apiResourceObj: &metav1.APIResourceList{ - GroupVersion: "apps.openyurt.io/v1alpha1", - APIResources: []metav1.APIResource{ - { - Name: "nodepools", - Kind: "NodePool", - }, - }, - }, - want: nil, - isExist: true, - }, - { - apiResourceObj: &metav1.APIResourceList{ - GroupVersion: "apps.openyurt.io/v1alpha1", - APIResources: []metav1.APIResource{ - { - Name: "nodepools", - Kind: "pod", - }, - }, - }, - want: nil, - isExist: false, - }, - } - for _, v := range cases { - fakeKubeClient := clientsetfake.NewSimpleClientset() - fakeKubeClient.Fake.Resources = append(fakeKubeClient.Fake.Resources, v.apiResourceObj) - isExist, err := nodePoolResourceExists(fakeKubeClient) - if err != v.want && isExist != v.isExist { - t.Errorf("falied to verify nodes pool is exist") - } - } -} - -func TestClusterConverter_DeployYurtHub(t *testing.T) { - case1 := struct { - configObj *corev1.ConfigMap - apiResourceObj *metav1.APIResourceList - want error - }{ - configObj: &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{Namespace: "kube-public", Name: "cluster-info"}, - }, - apiResourceObj: &metav1.APIResourceList{ - GroupVersion: "apps.openyurt.io/v1alpha1", - APIResources: []metav1.APIResource{ - { - Name: "nodepools", - Kind: "NodePool", - }, - }, - }, - want: nil, - } - - var fakeOut io.Writer - initializer := newKindInitializer(fakeOut, newKindOptions().Config()) - fakeclient := clientsetfake.NewSimpleClientset(case1.configObj) - fakeclient.Fake.Resources = append(fakeclient.Fake.Resources, case1.apiResourceObj) - initializer.kubeClient = fakeclient - converter := NewClusterConverter(initializer) - converter.ComponentsBuilder = yurtutil.NewBuilder(initializer.KubeConfig) - converter.CloudNodes = []string{} - converter.EdgeNodes = []string{} - err := converter.deployYurthub() - if err != case1.want { - t.Errorf("failed to deploy yurthub") - } - -} diff --git a/test/e2e/cmd/init/init.go b/test/e2e/cmd/init/init.go index 238220d60b0..2d21ef0b44a 100644 --- a/test/e2e/cmd/init/init.go +++ b/test/e2e/cmd/init/init.go @@ -20,8 +20,11 @@ import ( "context" "fmt" "io" + "net/http" "os" + "os/exec" "path/filepath" + "runtime" "strings" "time" @@ -29,9 +32,11 @@ import ( "github.com/spf13/pflag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" kubeclientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" + kubectllogs "k8s.io/kubectl/pkg/cmd/logs" "github.com/openyurtio/openyurt/pkg/projectinfo" strutil "github.com/openyurtio/openyurt/pkg/util/strings" @@ -40,6 +45,11 @@ import ( kubeutil "github.com/openyurtio/openyurt/test/e2e/cmd/init/util/kubernetes" ) +const ( + flannelYAMLURL = "https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml" + cniPluginsBaseURL = "https://github.com/containernetworking/plugins/releases/download/v1.4.1" +) + var ( validKubernetesVersions = []string{ "v1.17", @@ -130,7 +140,6 @@ type kindOptions struct { UseLocalImages bool KubeConfig string IgnoreError bool - EnableDummyIf bool DisableDefaultCNI bool } @@ -143,7 +152,6 @@ func newKindOptions() *kindOptions { KubernetesVersion: "v1.28", UseLocalImages: false, IgnoreError: false, - EnableDummyIf: true, DisableDefaultCNI: false, } } @@ -210,7 +218,6 @@ func (o *kindOptions) Config() *initializerConfig { YurtManagerImage: fmt.Sprintf(yurtManagerImageFormat, o.OpenYurtVersion), NodeServantImage: fmt.Sprintf(nodeServantImageFormat, o.OpenYurtVersion), yurtIotDockImage: fmt.Sprintf(yurtIotDockImageFormat, o.OpenYurtVersion), - EnableDummyIf: o.EnableDummyIf, DisableDefaultCNI: o.DisableDefaultCNI, } } @@ -235,8 +242,6 @@ func addFlags(flagset *pflag.FlagSet, o *kindOptions) { "Path where the kubeconfig file of new cluster will be stored. The default is ${HOME}/.kube/config.") flagset.BoolVar(&o.IgnoreError, "ignore-error", o.IgnoreError, "Ignore error when using openyurt version that is not officially released.") - flagset.BoolVar(&o.EnableDummyIf, "enable-dummy-if", o.EnableDummyIf, - "Enable dummy interface for yurthub component or not. and recommend to set false on mac env") flagset.BoolVar(&o.DisableDefaultCNI, "disable-default-cni", o.DisableDefaultCNI, "Disable the default cni of kind cluster which is kindnet. "+ "If this option is set, you should check the ready status of pods by yourself after installing your CNI.") @@ -256,15 +261,15 @@ type initializerConfig struct { YurtManagerImage string NodeServantImage string yurtIotDockImage string - EnableDummyIf bool DisableDefaultCNI bool } type Initializer struct { initializerConfig - out io.Writer - operator *KindOperator - kubeClient kubeclientset.Interface + out io.Writer + operator *KindOperator + kubeClient kubeclientset.Interface + componentsBuilder *kubeutil.Builder } func newKindInitializer(out io.Writer, cfg *initializerConfig) *Initializer { @@ -297,12 +302,31 @@ func (ki *Initializer) Run() error { } klog.Info("Start to prepare kube client") - kubeconfig, err := clientcmd.BuildConfigFromFlags("", ki.KubeConfig) + cfg, err := clientcmd.BuildConfigFromFlags("", ki.KubeConfig) + if err != nil { + return err + } + ki.componentsBuilder = kubeutil.NewBuilder(ki.KubeConfig) + + ki.kubeClient, err = kubeclientset.NewForConfig(cfg) if err != nil { return err } - ki.kubeClient, err = kubeclientset.NewForConfig(kubeconfig) + + // if default cni is not installed, install flannel instead. + if ki.DisableDefaultCNI { + klog.Info("Start to install flannel in order to make all nodes ready") + err = ki.installFlannel() + if err != nil { + return err + } + } + + klog.Info("Waiting all nodes are ready") + timeout := 2 * time.Minute + err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, timeout, true, allNodesReady(ki.kubeClient)) if err != nil { + klog.Errorf("Not all nodes are in Ready state: %v", err) return err } @@ -323,6 +347,150 @@ func (ki *Initializer) Run() error { return nil } +func (ki *Initializer) installFlannel() error { + cniURL := getCNIBinaryURL() + + err := downloadFile("/tmp/cni.tgz", cniURL) + if err != nil { + klog.Errorf("failed to download %s, %v", cniURL, err) + return err + } + + nodeContainers := []string{"openyurt-e2e-test-control-plane"} + for i := 1; i < ki.NodesNum; i++ { + if i == 1 { + nodeContainers = append(nodeContainers, "openyurt-e2e-test-worker") + } else { + workerName := fmt.Sprintf("openyurt-e2e-test-worker%d", i) + nodeContainers = append(nodeContainers, workerName) + } + } + + for _, container := range nodeContainers { + if err = copyAndExtractCNIPlugins(container, "/tmp/cni.tgz"); err != nil { + klog.Errorf("failed to prepare cni plugin for container %s, %v", container, err) + return err + } + } + + err = downloadFile("/tmp/flannel.yaml", flannelYAMLURL) + if err != nil { + klog.Errorf("failed to download %s, %v", flannelYAMLURL, err) + return err + } + + err = ki.componentsBuilder.InstallComponents("/tmp/flannel.yaml", false) + if err != nil { + klog.Errorf("Error install flannel components, %v", err) + return err + } + + err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 2*time.Minute, true, allFlannelPodReady(ki.kubeClient, "kube-flannel", "kube-flannel-ds")) + if err != nil { + klog.Errorf("Not all flannel pods are in Ready state: %v", err) + return err + } + + return nil +} + +func downloadFile(filepath string, url string) error { + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + + out, err := os.Create(filepath) + if err != nil { + return err + } + defer out.Close() + + _, err = io.Copy(out, resp.Body) + return err +} + +func copyAndExtractCNIPlugins(containerName string, cniPluginsTarPath string) error { + dockerCPCmd := exec.Command("docker", "cp", cniPluginsTarPath, containerName+":/opt/cni/bin/") + if err := execCommand(dockerCPCmd); err != nil { + return err + } + + dockerExecCmd := exec.Command("docker", "exec", "-t", containerName, "/bin/bash", "-c", "cd /opt/cni/bin && tar -zxf cni.tgz") + if err := execCommand(dockerExecCmd); err != nil { + return err + } + + return nil +} + +func execCommand(cmd *exec.Cmd) error { + output, err := cmd.CombinedOutput() + if err != nil { + klog.Errorf("execute command error: %v", err) + return err + } + klog.Infof("Command executed: %s Output: %s", cmd.String(), output) + return nil +} + +func getCNIBinaryURL() string { + var arch string + switch runtime.GOARCH { + case "amd64": + arch = "amd64" + case "arm64": + arch = "arm64" + default: + panic("unsupported architecture") + } + return fmt.Sprintf("%s/cni-plugins-linux-%s-v1.4.1.tgz", cniPluginsBaseURL, arch) +} + +func allFlannelPodReady(clientset kubeclientset.Interface, namespace, dsName string) wait.ConditionWithContextFunc { + return func(ctx context.Context) (bool, error) { + ds, err := clientset.AppsV1().DaemonSets(namespace).Get(context.TODO(), dsName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + if ds.Status.DesiredNumberScheduled == ds.Status.NumberReady { + return true, nil + } + + return false, nil + } +} + +func allNodesReady(clientset kubeclientset.Interface) wait.ConditionWithContextFunc { + return func(ctx context.Context) (bool, error) { + nodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + for _, node := range nodes.Items { + isNodeReady := false + for _, condition := range node.Status.Conditions { + if condition.Type == "Ready" && condition.Status == "True" { + klog.Infof("Now node %s is ready", node.Name) + isNodeReady = true + break + } + } + if !isNodeReady { + url := clientset.CoreV1().RESTClient().Get().Resource("nodes").Name(node.Name).URL() + nodeRequest := clientset.CoreV1().RESTClient().Get().AbsPath(url.Path) + if err := kubectllogs.DefaultConsumeRequest(nodeRequest, os.Stderr); err != nil { + klog.Errorf("failed to print node(%s) info, %v", node.Name, err) + } + return false, nil + } + } + return true, nil + } +} + func (ki *Initializer) prepareImages() error { if !ki.UseLocalImage { return nil @@ -390,12 +558,11 @@ func (ki *Initializer) prepareKindConfigFile(kindConfigPath string) error { if err = os.WriteFile(kindConfigPath, []byte(kindConfigContent), constants.FileMode); err != nil { return err } - klog.V(1).Infof("generated new kind config file at %s", kindConfigPath) + klog.Infof("generated new kind config file at %s, contents: %s", kindConfigPath, kindConfigContent) return nil } func (ki *Initializer) configureAddons() error { - if err := ki.configureCoreDnsAddon(); err != nil { return err } @@ -420,33 +587,28 @@ func (ki *Initializer) configureAddons() error { } } - // If we disable default cni, nodes will not be ready and the coredns pod always be in pending. - // The health check for coreDNS should be done by someone who will install CNI. - if !ki.DisableDefaultCNI { - // wait for coredns pods available - for { - select { - case <-time.After(10 * time.Second): - dnsDp, err := ki.kubeClient.AppsV1().Deployments("kube-system").Get(context.TODO(), "coredns", metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("failed to get coredns deployment when waiting for available, %v", err) - } + // wait for coredns pods available + for { + select { + case <-time.After(10 * time.Second): + dnsDp, err := ki.kubeClient.AppsV1().Deployments("kube-system").Get(context.TODO(), "coredns", metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get coredns deployment when waiting for available, %v", err) + } - if dnsDp.Status.ObservedGeneration < dnsDp.Generation { - klog.Infof("waiting for coredns generation(%d) to be observed. now observed generation is %d", dnsDp.Generation, dnsDp.Status.ObservedGeneration) - continue - } + if dnsDp.Status.ObservedGeneration < dnsDp.Generation { + klog.Infof("waiting for coredns generation(%d) to be observed. now observed generation is %d", dnsDp.Generation, dnsDp.Status.ObservedGeneration) + continue + } - if *dnsDp.Spec.Replicas != dnsDp.Status.AvailableReplicas { - klog.Infof("waiting for coredns replicas(%d) to be ready, now %d pods available", *dnsDp.Spec.Replicas, dnsDp.Status.AvailableReplicas) - continue - } - klog.Info("coredns deployment configuration is completed") - return nil + if *dnsDp.Spec.Replicas != dnsDp.Status.AvailableReplicas { + klog.Infof("waiting for coredns replicas(%d) to be ready, now %d pods available", *dnsDp.Spec.Replicas, dnsDp.Status.AvailableReplicas) + continue } + klog.Info("coredns deployment configuration is completed") + return nil } } - return nil } func (ki *Initializer) configureCoreDnsAddon() error { @@ -513,7 +675,6 @@ func (ki *Initializer) deployOpenYurt() error { } converter := &ClusterConverter{ RootDir: dir, - ComponentsBuilder: kubeutil.NewBuilder(ki.KubeConfig), ClientSet: ki.kubeClient, CloudNodes: ki.CloudNodes, EdgeNodes: ki.EdgeNodes, @@ -523,7 +684,6 @@ func (ki *Initializer) deployOpenYurt() error { YurtManagerImage: ki.YurtManagerImage, NodeServantImage: ki.NodeServantImage, YurthubImage: ki.YurtHubImage, - EnableDummyIf: ki.EnableDummyIf, } if err := converter.Run(); err != nil { klog.Errorf("errors occurred when deploying openyurt components") diff --git a/test/e2e/cmd/init/init_test.go b/test/e2e/cmd/init/init_test.go index dfadbc732e6..d1d5bd6d196 100644 --- a/test/e2e/cmd/init/init_test.go +++ b/test/e2e/cmd/init/init_test.go @@ -60,7 +60,6 @@ func TestAddFlags(t *testing.T) { UseLocalImages: true, KubeConfig: "/home/root/.kube/config", IgnoreError: true, - EnableDummyIf: true, DisableDefaultCNI: true, } @@ -417,9 +416,6 @@ func IsConsistent(initPoint1, initPoint2 *initializerConfig) bool { if initPoint1.NodeServantImage != initPoint2.NodeServantImage { return false } - if initPoint1.EnableDummyIf != initPoint2.EnableDummyIf { - return false - } return true } @@ -439,7 +435,6 @@ func TestKindOptions_Config(t *testing.T) { YurtHubImage: "openyurt/yurthub:latest", YurtManagerImage: "openyurt/yurt-manager:latest", NodeServantImage: "openyurt/node-servant:latest", - EnableDummyIf: true, } if !IsConsistent(&wants, case1.Config()) { t.Errorf("Failed to configure initializer") diff --git a/test/e2e/cmd/init/kindoperator_test.go b/test/e2e/cmd/init/kindoperator_test.go index 9f22ad8ee78..198707b5819 100644 --- a/test/e2e/cmd/init/kindoperator_test.go +++ b/test/e2e/cmd/init/kindoperator_test.go @@ -32,7 +32,7 @@ const ( var operator = NewKindOperator("kind", "") -func execCommand(testFunc string, env map[string]string, name string, args ...string) *exec.Cmd { +func execTestCommand(testFunc string, env map[string]string, name string, args ...string) *exec.Cmd { cs := []string{fmt.Sprintf("-test.run=%s", testFunc), "--", name} cs = append(cs, args...) cmd := exec.Command(os.Args[0], cs...) @@ -47,7 +47,7 @@ func execCommand(testFunc string, env map[string]string, name string, args ...st func TestKindVersion(t *testing.T) { fakeExecCommand := func(name string, args ...string) *exec.Cmd { - return execCommand("TestKindVersionStub", map[string]string{ + return execTestCommand("TestKindVersionStub", map[string]string{ "TEST_KIND_VERSION": "1", }, name, args...) } @@ -83,7 +83,7 @@ func TestKindVersionStub(t *testing.T) { func TestGoMinorVersion(t *testing.T) { fakeExecCommand := func(name string, args ...string) *exec.Cmd { - return execCommand("TestGoMinorVersionStub", map[string]string{ + return execTestCommand("TestGoMinorVersionStub", map[string]string{ "TEST_GO_MINOR_VERSION": "1", }, name, args...) } @@ -157,7 +157,7 @@ func TestKindOperator_KindLoadDockerImage(t *testing.T) { } for caseName, c := range cases { fakeExecCommand := func(name string, args ...string) *exec.Cmd { - return execCommand("TestKindLoadDockerImageStub", map[string]string{ + return execTestCommand("TestKindLoadDockerImageStub", map[string]string{ "TEST_KIND_LOAD_DOCKER_IMAGE": "1", "CASE_NAME": caseName, "WANT": c.want, diff --git a/test/e2e/cmd/init/util/kubernetes/apply_addons.go b/test/e2e/cmd/init/util/kubernetes/apply_addons.go index a6462f06493..55cc8b8bf12 100644 --- a/test/e2e/cmd/init/util/kubernetes/apply_addons.go +++ b/test/e2e/cmd/init/util/kubernetes/apply_addons.go @@ -17,113 +17,13 @@ limitations under the License. package kubernetes import ( - "context" "fmt" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" - kubeclientset "k8s.io/client-go/kubernetes" kubectlutil "k8s.io/kubectl/pkg/cmd/util" - - "github.com/openyurtio/openyurt/pkg/projectinfo" - "github.com/openyurtio/openyurt/test/e2e/cmd/init/constants" ) -// DeployYurthubSetting deploy clusterrole, clusterrolebinding for yurthub static pod. -func DeployYurthubSetting(client kubeclientset.Interface) error { - // 1. create the ClusterRole - if err := CreateClusterRoleFromYaml(client, constants.YurthubClusterRole); err != nil { - return err - } - - // 2. create the ClusterRoleBinding - if err := CreateClusterRoleBindingFromYaml(client, constants.YurthubClusterRoleBinding); err != nil { - return err - } - - // 3. create the Configmap - if err := CreateConfigMapFromYaml(client, - SystemNamespace, - constants.YurthubConfigMap); err != nil { - return err - } - - return nil -} - -// DeleteYurthubSetting rm settings for yurthub pod -func DeleteYurthubSetting(client kubeclientset.Interface) error { - - // 1. delete the ClusterRoleBinding - if err := client.RbacV1().ClusterRoleBindings(). - Delete(context.Background(), constants.YurthubComponentName, - metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("fail to delete the clusterrolebinding/%s: %w", - constants.YurthubComponentName, err) - } - - // 2. delete the ClusterRole - if err := client.RbacV1().ClusterRoles(). - Delete(context.Background(), constants.YurthubComponentName, - metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("fail to delete the clusterrole/%s: %w", - constants.YurthubComponentName, err) - } - - // 3. remove the ConfigMap - if err := client.CoreV1().ConfigMaps(constants.YurthubNamespace). - Delete(context.Background(), constants.YurthubCmName, - metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("fail to delete the configmap/%s: %w", - constants.YurthubCmName, err) - } - - return nil -} - -func CreateYurtManager(client kubeclientset.Interface, yurtManagerImage string) error { - if err := CreateSecretFromYaml(client, SystemNamespace, constants.YurtManagerCertsSecret); err != nil { - return err - } - - if err := CreateServiceAccountFromYaml(client, - SystemNamespace, constants.YurtManagerServiceAccount); err != nil { - return err - } - - // bind the clusterrole - if err := CreateClusterRoleBindingFromYaml(client, - constants.YurtManagerClusterRoleBinding); err != nil { - return err - } - - // bind the role - if err := CreateRoleBindingFromYaml(client, - constants.YurtManagerRoleBinding); err != nil { - return err - } - - // create the Service - if err := CreateServiceFromYaml(client, - SystemNamespace, - constants.YurtManagerService); err != nil { - return err - } - - // create the yurt-manager deployment - if err := CreateDeployFromYaml(client, - SystemNamespace, - constants.YurtManagerDeployment, - map[string]string{ - "image": yurtManagerImage, - "edgeNodeLabel": projectinfo.GetEdgeWorkerLabelKey()}); err != nil { - return err - } - return nil -} - type Builder struct { kubectlutil.Factory } diff --git a/test/e2e/cmd/init/util/kubernetes/apply_addons_test.go b/test/e2e/cmd/init/util/kubernetes/apply_addons_test.go deleted file mode 100644 index df7fe64ab87..00000000000 --- a/test/e2e/cmd/init/util/kubernetes/apply_addons_test.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2022 The OpenYurt Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubernetes - -import ( - "testing" - - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientsetfake "k8s.io/client-go/kubernetes/fake" -) - -func TestDeployYurthubSetting(t *testing.T) { - fakeKubeClient := clientsetfake.NewSimpleClientset() - err := DeployYurthubSetting(fakeKubeClient) - if err != nil { - t.Logf("falied deploy yurt controller manager") - } -} - -func TestDeleteYurthubSetting(t *testing.T) { - cases := []struct { - clusterRoleObj *rbacv1.ClusterRole - clusterRoleBindingObj *rbacv1.ClusterRoleBinding - configObj *corev1.ConfigMap - want error - }{ - { - clusterRoleObj: &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "kube-system", - Name: "yurt-hub", - }, - }, - clusterRoleBindingObj: &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "kube-system", - Name: "yurt-hub", - }, - }, - - configObj: &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "kube-system", - Name: "yurt-hub-cfg", - }, - }, - want: nil, - }, - } - for _, v := range cases { - fakeKubeClient := clientsetfake.NewSimpleClientset(v.configObj) - err := DeleteYurthubSetting(fakeKubeClient) - if err != v.want { - t.Errorf("failed to delete yurthub setting") - } - } -} diff --git a/test/e2e/cmd/init/util/kubernetes/util.go b/test/e2e/cmd/init/util/kubernetes/util.go index 04eedda72f4..def4c389231 100644 --- a/test/e2e/cmd/init/util/kubernetes/util.go +++ b/test/e2e/cmd/init/util/kubernetes/util.go @@ -18,26 +18,20 @@ package kubernetes import ( "context" - "errors" "fmt" "io" - "os" "sort" "sync" "time" - appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" - k8sruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" kubeclientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" bootstrapapi "k8s.io/cluster-bootstrap/token/api" bootstraputil "k8s.io/cluster-bootstrap/token/util" "k8s.io/klog/v2" @@ -47,7 +41,6 @@ import ( bootstraptokenv1 "github.com/openyurtio/openyurt/pkg/util/kubernetes/kubeadm/app/apis/bootstraptoken/v1" kubeadmconstants "github.com/openyurtio/openyurt/pkg/util/kubernetes/kubeadm/app/constants" nodetoken "github.com/openyurtio/openyurt/pkg/util/kubernetes/kubeadm/app/phases/bootstraptoken/node" - tmplutil "github.com/openyurtio/openyurt/pkg/util/templates" ) const ( @@ -63,230 +56,82 @@ var ( CheckServantJobPeriod = time.Second * 10 ) -func processCreateErr(kind string, name string, err error) error { +func AddEdgeWorkerLabelAndAutonomyAnnotation(cliSet kubeclientset.Interface, node *corev1.Node, lVal, aVal string) (*corev1.Node, error) { + node.Labels[projectinfo.GetEdgeWorkerLabelKey()] = lVal + node.Annotations[projectinfo.GetAutonomyAnnotation()] = aVal + newNode, err := cliSet.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{}) if err != nil { - if apierrors.IsAlreadyExists(err) { - klog.V(4).Infof("[WARNING] %s/%s is already in cluster, skip to prepare it", kind, name) - return nil - } - return fmt.Errorf("fail to create the %s/%s: %w", kind, name, err) + return nil, err } - klog.V(4).Infof("%s/%s is created", kind, name) - return nil + return newNode, nil } -// CreateSecretFromYaml creates the Secret from the yaml template. -func CreateSecretFromYaml(cliSet kubeclientset.Interface, ns, saTmpl string) error { - obj, err := YamlToObject([]byte(saTmpl)) +// RunJobAndCleanup runs the job, wait for it to be complete, and delete it +func RunJobAndCleanup(cliSet kubeclientset.Interface, job *batchv1.Job, timeout, period time.Duration) error { + job, err := cliSet.BatchV1().Jobs(job.GetNamespace()).Create(context.Background(), job, metav1.CreateOptions{}) if err != nil { return err } - se, ok := obj.(*corev1.Secret) - if !ok { - return fmt.Errorf("fail to assert secret: %w", err) - } - _, err = cliSet.CoreV1().Secrets(ns).Create(context.Background(), se, metav1.CreateOptions{}) - return processCreateErr("secret", se.Name, err) -} -// CreateServiceAccountFromYaml creates the ServiceAccount from the yaml template. -func CreateServiceAccountFromYaml(cliSet kubeclientset.Interface, ns, saTmpl string) error { - obj, err := YamlToObject([]byte(saTmpl)) + err = wait.PollUntilContextTimeout(context.Background(), period, timeout, true, jobIsCompleted(cliSet, job)) if err != nil { + klog.Errorf("Error job(%s/%s) is not completed, %v", job.Namespace, job.Name, err) return err } - sa, ok := obj.(*corev1.ServiceAccount) - if !ok { - return fmt.Errorf("fail to assert serviceaccount: %w", err) - } - _, err = cliSet.CoreV1().ServiceAccounts(ns).Create(context.Background(), sa, metav1.CreateOptions{}) - return processCreateErr("serviceaccount", sa.Name, err) -} -// CreateClusterRoleFromYaml creates the ClusterRole from the yaml template. -func CreateClusterRoleFromYaml(cliSet kubeclientset.Interface, crTmpl string) error { - obj, err := YamlToObject([]byte(crTmpl)) - if err != nil { - return err - } - cr, ok := obj.(*rbacv1.ClusterRole) - if !ok { - return fmt.Errorf("fail to assert clusterrole: %w", err) - } - _, err = cliSet.RbacV1().ClusterRoles().Create(context.Background(), cr, metav1.CreateOptions{}) - return processCreateErr("clusterrole", cr.Name, err) + return cliSet.BatchV1().Jobs(job.GetNamespace()).Delete(context.Background(), job.GetName(), metav1.DeleteOptions{ + PropagationPolicy: &PropagationPolicy, + }) } -// CreateClusterRoleBindingFromYaml creates the ClusterRoleBinding from the yaml template. -func CreateClusterRoleBindingFromYaml(cliSet kubeclientset.Interface, crbTmpl string) error { - obj, err := YamlToObject([]byte(crbTmpl)) - if err != nil { - return err - } - crb, ok := obj.(*rbacv1.ClusterRoleBinding) - if !ok { - return fmt.Errorf("fail to assert clusterrolebinding: %w", err) - } - _, err = cliSet.RbacV1().ClusterRoleBindings().Create(context.Background(), crb, metav1.CreateOptions{}) - return processCreateErr("clusterrolebinding", crb.Name, err) -} +func jobIsCompleted(clientset kubeclientset.Interface, job *batchv1.Job) wait.ConditionWithContextFunc { + return func(ctx context.Context) (bool, error) { + newJob, err := clientset.BatchV1().Jobs(job.GetNamespace()).Get(context.Background(), job.GetName(), metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return false, err + } -// CreateRoleBindingFromYaml creates the RoleBinding from the yaml template. -func CreateRoleBindingFromYaml(cliSet kubeclientset.Interface, crbTmpl string) error { - obj, err := YamlToObject([]byte(crbTmpl)) - if err != nil { - return err - } - rb, ok := obj.(*rbacv1.RoleBinding) - if !ok { - return fmt.Errorf("fail to assert rolebinding: %w", err) - } - _, err = cliSet.RbacV1().RoleBindings("kube-system").Create(context.Background(), rb, metav1.CreateOptions{}) - return processCreateErr("rolebinding", rb.Name, err) -} + // kube-apiserver maybe not work currently, so we should skip other errors + return false, nil + } -// CreateConfigMapFromYaml creates the ConfigMap from the yaml template. -func CreateConfigMapFromYaml(cliSet kubeclientset.Interface, ns, cmTmpl string) error { - obj, err := YamlToObject([]byte(cmTmpl)) - if err != nil { - return err - } - cm, ok := obj.(*corev1.ConfigMap) - if !ok { - return fmt.Errorf("fail to assert configmap: %w", err) - } - _, err = cliSet.CoreV1().ConfigMaps(ns).Create(context.Background(), cm, metav1.CreateOptions{}) - return processCreateErr("configmap", cm.Name, err) -} + if newJob.Status.Succeeded == *newJob.Spec.Completions { + return true, nil + } -// CreateDeployFromYaml creates the Deployment from the yaml template. -func CreateDeployFromYaml(cliSet kubeclientset.Interface, ns, dplyTmpl string, ctx interface{}) error { - ycmdp, err := tmplutil.SubsituteTemplate(dplyTmpl, ctx) - if err != nil { - return err + return false, nil } - dpObj, err := YamlToObject([]byte(ycmdp)) - if err != nil { - return err - } - dply, ok := dpObj.(*appsv1.Deployment) - if !ok { - return errors.New("fail to assert Deployment") - } - _, err = cliSet.AppsV1().Deployments(ns).Create(context.Background(), dply, metav1.CreateOptions{}) - return processCreateErr("deployment", dply.Name, err) } -// CreateServiceFromYaml creates the Service from the yaml template. -func CreateServiceFromYaml(cliSet kubeclientset.Interface, ns, svcTmpl string) error { - obj, err := YamlToObject([]byte(svcTmpl)) - if err != nil { +func DumpPod(client kubeclientset.Interface, pod *corev1.Pod, w io.Writer) error { + klog.Infof("dump pod(%s/%s) info:", pod.Namespace, pod.Name) + url := client.CoreV1().RESTClient().Get().Resource("pods").Namespace(pod.Namespace).Name(pod.Name).URL() + podRequest := client.CoreV1().RESTClient().Get().AbsPath(url.Path) + if err := kubectllogs.DefaultConsumeRequest(podRequest, w); err != nil { + klog.Errorf("failed to print pod(%s/%s) info, %v", pod.Namespace, pod.Name, err) return err } - svc, ok := obj.(*corev1.Service) - if !ok { - return fmt.Errorf("fail to assert service: %w", err) - } - _, err = cliSet.CoreV1().Services(ns).Create(context.Background(), svc, metav1.CreateOptions{}) - return processCreateErr("service", svc.Name, err) -} -// YamlToObject deserializes object in yaml format to a runtime.Object -func YamlToObject(yamlContent []byte) (k8sruntime.Object, error) { - decode := serializer.NewCodecFactory(scheme.Scheme).UniversalDeserializer().Decode - obj, _, err := decode(yamlContent, nil, nil) - if err != nil { - return nil, err - } - return obj, nil -} - -// AnnotateNode add a new annotation (=) to the given node -func AnnotateNode(cliSet kubeclientset.Interface, node *corev1.Node, key, val string) (*corev1.Node, error) { - node.Annotations[key] = val - newNode, err := cliSet.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{}) - if err != nil { - return nil, err + klog.Infof("start to print logs for pod(%s/%s):", pod.Namespace, pod.Name) + req := client.CoreV1().Pods(pod.GetNamespace()).GetLogs(pod.Name, &corev1.PodLogOptions{}) + if err := kubectllogs.DefaultConsumeRequest(req, w); err != nil { + klog.Errorf("failed to print logs for pod(%s/%s), %v", pod.Namespace, pod.Name, err) + return err } - return newNode, nil -} -func AddEdgeWorkerLabelAndAutonomyAnnotation(cliSet kubeclientset.Interface, node *corev1.Node, lVal, aVal string) (*corev1.Node, error) { - node.Labels[projectinfo.GetEdgeWorkerLabelKey()] = lVal - node.Annotations[projectinfo.GetAutonomyAnnotation()] = aVal - newNode, err := cliSet.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{}) - if err != nil { - return nil, err - } - return newNode, nil -} - -// RunJobAndCleanup runs the job, wait for it to be complete, and delete it -func RunJobAndCleanup(cliSet kubeclientset.Interface, job *batchv1.Job, timeout, period time.Duration, waitForTimeout bool) error { - job, err := cliSet.BatchV1().Jobs(job.GetNamespace()).Create(context.Background(), job, metav1.CreateOptions{}) + klog.Infof("start to print events for pod(%s/%s):", pod.Namespace, pod.Name) + fieldSelector := "involvedObject.name=" + pod.Name + eventList, err := client.CoreV1().Events(pod.Namespace).List(context.Background(), metav1.ListOptions{ + FieldSelector: fieldSelector, + }) if err != nil { + klog.Errorf("failed to dump events for pod(%s/%s), %v", pod.Namespace, pod.Name, err) return err } - waitJobTimeout := time.After(timeout) - defer func() { - labelSelector, err := metav1.LabelSelectorAsSelector(job.Spec.Selector) - if err != nil { - return - } - podList, err := cliSet.CoreV1().Pods(job.GetNamespace()).List(context.TODO(), metav1.ListOptions{ - LabelSelector: labelSelector.String(), - }) - if err != nil { - return - } - - if len(podList.Items) == 0 { - return - } - if err := PrintPodLog(cliSet, &podList.Items[0], os.Stderr); err != nil { - klog.Errorf("failed to print job pod logs, %v", err) - } - }() - - for { - select { - case <-waitJobTimeout: - return errors.New("wait for job to be complete timeout") - case <-time.After(period): - newJob, err := cliSet.BatchV1().Jobs(job.GetNamespace()). - Get(context.Background(), job.GetName(), metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - return err - } - - if waitForTimeout { - klog.Infof("continue to wait for job(%s) to complete until timeout, even if failed to get job, %v", job.GetName(), err) - continue - } - return err - } - - if newJob.Status.Succeeded == *newJob.Spec.Completions { - if err := cliSet.BatchV1().Jobs(job.GetNamespace()). - Delete(context.Background(), job.GetName(), metav1.DeleteOptions{ - PropagationPolicy: &PropagationPolicy, - }); err != nil { - klog.Errorf("fail to delete succeeded servant job(%s): %s", job.GetName(), err) - return err - } - return nil - } - } - } -} -func PrintPodLog(client kubeclientset.Interface, pod *corev1.Pod, w io.Writer) error { - klog.Infof("start to print logs for pod(%s/%s):", pod.Namespace, pod.Name) - req := client.CoreV1().Pods(pod.GetNamespace()).GetLogs(pod.Name, &corev1.PodLogOptions{}) - if err := kubectllogs.DefaultConsumeRequest(req, w); err != nil { - klog.Errorf("failed to print logs for pod(%s/%s), %v", pod.Namespace, pod.Name, err) - return err + for _, event := range eventList.Items { + klog.Infof("Pod(%s/%s) Event: %v, Type: %v, Reason: %v, Message: %v", pod.Namespace, pod.Name, event.Name, event.Type, event.Reason, event.Message) } return nil @@ -298,8 +143,7 @@ func RunServantJobs( cliSet kubeclientset.Interface, waitServantJobTimeout time.Duration, getJob func(nodeName string) (*batchv1.Job, error), - nodeNames []string, ww io.Writer, - waitForTimeout bool) error { + nodeNames []string, ww io.Writer) error { var wg sync.WaitGroup jobByNodeName := make(map[string]*batchv1.Job) @@ -318,7 +162,7 @@ func RunServantJobs( job := jobByNodeName[nodeName] go func() { defer wg.Done() - if err := RunJobAndCleanup(cliSet, job, waitServantJobTimeout, CheckServantJobPeriod, waitForTimeout); err != nil { + if err := RunJobAndCleanup(cliSet, job, waitServantJobTimeout, CheckServantJobPeriod); err != nil { errCh <- fmt.Errorf("[ERROR] fail to run servant job(%s): %w", job.GetName(), err) } else { res <- fmt.Sprintf("\t[INFO] servant job(%s) has succeeded\n", job.GetName()) diff --git a/test/e2e/cmd/init/util/kubernetes/util_test.go b/test/e2e/cmd/init/util/kubernetes/util_test.go index 5a3d32470a8..bafb31f78d8 100644 --- a/test/e2e/cmd/init/util/kubernetes/util_test.go +++ b/test/e2e/cmd/init/util/kubernetes/util_test.go @@ -20,265 +20,14 @@ import ( "testing" "time" - appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientsetfake "k8s.io/client-go/kubernetes/fake" "github.com/openyurtio/openyurt/pkg/projectinfo" - "github.com/openyurtio/openyurt/test/e2e/cmd/init/constants" ) -const testDeployment = ` -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx-deployment - labels: - app: nginx -spec: - replicas: 3 - selector: - matchLabels: - app: nginx - template: - metadata: - labels: - app: nginx - spec: - containers: - - name: nginx - image: nginx:latest - ports: - - containerPort: 80 -` - -func TestYamlToObject(t *testing.T) { - obj, err := YamlToObject([]byte(testDeployment)) - if err != nil { - t.Fatalf("YamlToObj failed: %s", err) - } - - nd, ok := obj.(*appsv1.Deployment) - if !ok { - t.Fatalf("Fail to assert deployment: %s", err) - } - - if nd.GetName() != "nginx-deployment" { - t.Fatalf("YamlToObj failed: want \"nginx-deployment\" get \"%s\"", nd.GetName()) - } - - val, exist := nd.GetLabels()["app"] - if !exist { - t.Fatal("YamlToObj failed: label \"app\" doesnot exist") - } - if val != "nginx" { - t.Fatalf("YamlToObj failed: want \"nginx\" get %s", val) - } - - if *nd.Spec.Replicas != 3 { - t.Fatalf("YamlToObj failed: want 3 get %d", *nd.Spec.Replicas) - } -} - -func TestCreateServiceAccountFromYaml(t *testing.T) { - cases := []struct { - namespace string - svcaccount string - want error - }{ - { - namespace: "kube-system", - svcaccount: ` -apiVersion: v1 -kind: ServiceAccount -metadata: - name: yurt-tunnel-server - namespace: kube-system -`, - want: nil, - }, - { - namespace: "kube-system", - svcaccount: ` -apiVersion: v1 -kind: ServiceAccount -metadata: - name: yurt-raven-server - namespace: kube-system -`, - want: nil, - }, - } - fakeKubeClient := clientsetfake.NewSimpleClientset() - for _, v := range cases { - err := CreateServiceAccountFromYaml(fakeKubeClient, v.namespace, v.svcaccount) - if err != v.want { - t.Logf("falied to create service account from yaml") - } - } -} - -func TestCreateClusterRoleFromYaml(t *testing.T) { - case1 := struct { - clusterrole string - want error - }{ - clusterrole: constants.YurthubClusterRole, - want: nil, - } - fakeKubeClient := clientsetfake.NewSimpleClientset() - err := CreateClusterRoleFromYaml(fakeKubeClient, case1.clusterrole) - if err != case1.want { - t.Logf("falied to create cluster role from yaml") - } -} - -func TestClusterRoleBindingCreateFromYaml(t *testing.T) { - cases := []struct { - namespace string - clusterrolebinding string - want error - }{ - { - namespace: "kube-system", - clusterrolebinding: constants.YurtManagerClusterRoleBinding, - want: nil, - }, - } - for _, v := range cases { - fakeKubeClient := clientsetfake.NewSimpleClientset() - err := CreateClusterRoleBindingFromYaml(fakeKubeClient, v.clusterrolebinding) - if err != v.want { - t.Logf("falied to create cluster role binding from yaml") - } - } -} - -func TestCreateDeployFromYaml(t *testing.T) { - cases := struct { - namespace string - deployment string - want error - }{ - namespace: "kube-system", - deployment: constants.YurtManagerDeployment, - want: nil, - } - fakeKubeClient := clientsetfake.NewSimpleClientset() - err := CreateDeployFromYaml(fakeKubeClient, cases.namespace, cases.deployment, map[string]string{ - "image": "openyurt/yurt-manager:latest", - "edgeWorkerLabel": projectinfo.GetEdgeWorkerLabelKey()}) - if err != cases.want { - t.Logf("falied to create deployment from yaml") - } -} - -func TestCreateServiceFromYaml(t *testing.T) { - cases := []struct { - namespace string - service string - want error - }{ - { - namespace: "kube-system", - service: constants.YurtManagerService, - want: nil, - }, - } - for _, v := range cases { - fakeKubeClient := clientsetfake.NewSimpleClientset() - err := CreateServiceFromYaml(fakeKubeClient, v.namespace, v.service) - if err != v.want { - t.Logf("falied to create service from yaml") - } - } - -} - -func TestConfigMapFromYaml(t *testing.T) { - cases := []struct { - namespace string - configMap string - want error - }{ - { - namespace: "kube-system", - configMap: constants.YurthubConfigMap, - want: nil, - }, - { - namespace: "kube-system", - configMap: constants.YurthubConfigMap, - want: nil, - }, - } - for _, v := range cases { - fakeKubeClient := clientsetfake.NewSimpleClientset() - err := CreateConfigMapFromYaml(fakeKubeClient, v.namespace, v.configMap) - if err != v.want { - t.Logf("falied to create service from yaml") - } - } -} - -func TestAnnotateNode(t *testing.T) { - cases := []struct { - node *corev1.Node - want *corev1.Node - key string - val string - }{ - { - node: &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cloud-node", - Annotations: map[string]string{ - "foo": "yeah~", - }, - }, - }, - want: &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cloud-node", - Annotations: map[string]string{ - "foo": "foo~", - }, - }, - }, - key: "foo", - val: "foo~", - }, - { - node: &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "edge-node", - Annotations: map[string]string{}, - }, - }, - want: &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cloud-node", - Annotations: map[string]string{ - "foo": "foo~", - }, - }, - }, - key: "foo", - val: "yeah~", - }, - } - - for _, v := range cases { - fakeKubeClient := clientsetfake.NewSimpleClientset(v.node) - res, err := AnnotateNode(fakeKubeClient, v.node, v.key, v.val) - if err != nil || res.Annotations[v.key] != v.val { - t.Logf("falied to annotate nodes") - } - } -} - func TestAddEdgeWorkerLabelAndAutonomyAnnotation(t *testing.T) { cases := []struct { node *corev1.Node @@ -390,54 +139,9 @@ func TestRunJobAndCleanup(t *testing.T) { for _, v := range cases { fakeKubeClient := clientsetfake.NewSimpleClientset() - err := RunJobAndCleanup(fakeKubeClient, v.jobObj, time.Second*10, time.Second, false) + err := RunJobAndCleanup(fakeKubeClient, v.jobObj, time.Second*10, time.Second) if err != v.want { t.Logf("falied to run job and cleanup") } } } - -/* -func TestRunServantJobs(t *testing.T) { - - var ww io.Writer - convertCtx := map[string]string{ - "node_servant_image": "foo_servant_image", - "yurthub_image": "foo_yurthub_image", - "joinToken": "foo", - // The node-servant will detect the kubeadm_conf_path automatically - // It will be either "/usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf" - // or "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf". - "kubeadm_conf_path": "", - "working_mode": "edge", - "enable_dummy_if": "true", - } - - cases := []struct { - nodeName []string - want error - }{ - { - nodeName: []string{ - "cloud-node", - "edge-node", - }, - want: nil, - }, - { - nodeName: []string{"foo", "test"}, - want: nil, - }, - } - - for _, v := range cases { - fakeKubeClient := clientsetfake.NewSimpleClientset() - err := RunServantJobs(fakeKubeClient, time.Second, func(nodeName string) (*batchv1.Job, error) { - return nodeservant.RenderNodeServantJob("convert", convertCtx, nodeName) - }, v.nodeName, ww, false) - if err != nil { - t.Logf("falied to run servant jobs") - } - } -} -*/