diff --git a/operator/PROJECT b/operator/PROJECT index 7618950c9..f70e2edc0 100644 --- a/operator/PROJECT +++ b/operator/PROJECT @@ -30,4 +30,13 @@ resources: kind: ManagedClusterMigration path: github.com/stolostron/multicluster-global-hub-operator/migration/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: open-cluster-management.io + group: operator + kind: MulticlusterGlobalHubAgent + path: github.com/stolostron/multicluster-global-hub-operator/api/operator/v1alpha1 + version: v1alpha1 version: "3" diff --git a/operator/api/operator/shared/shared.go b/operator/api/operator/shared/shared.go new file mode 100644 index 000000000..e2e80373e --- /dev/null +++ b/operator/api/operator/shared/shared.go @@ -0,0 +1,36 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package shared contains API Schema definitions for the operator API group +// +kubebuilder:object:generate=true +// +groupName=operator.open-cluster-management.io + +package shared + +import ( + corev1 "k8s.io/api/core/v1" +) + +// ResourceRequirements copied from corev1.ResourceRequirements +// We do not need to support ResourceClaim +type ResourceRequirements struct { + // Requests describes the minimum amount of compute resources required. + // If requests are omitted for a container, it defaults to the specified limits. + // If there are no specified limits, it defaults to an implementation-defined value. + // For more information, see: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Requests corev1.ResourceList `json:"requests,omitempty"` +} diff --git a/operator/api/operator/shared/zz_generated.deepcopy.go b/operator/api/operator/shared/zz_generated.deepcopy.go new file mode 100644 index 000000000..25fc8e3b7 --- /dev/null +++ b/operator/api/operator/shared/zz_generated.deepcopy.go @@ -0,0 +1,47 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package shared + +import ( + "k8s.io/api/core/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. +func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { + if in == nil { + return nil + } + out := new(ResourceRequirements) + in.DeepCopyInto(out) + return out +} diff --git a/operator/api/operator/v1alpha1/groupversion_info.go b/operator/api/operator/v1alpha1/groupversion_info.go new file mode 100644 index 000000000..d247186f9 --- /dev/null +++ b/operator/api/operator/v1alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the operator v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=operator.open-cluster-management.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "operator.open-cluster-management.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/operator/api/operator/v1alpha1/multiclusterglobalhubagent_types.go b/operator/api/operator/v1alpha1/multiclusterglobalhubagent_types.go new file mode 100644 index 000000000..93cb5db76 --- /dev/null +++ b/operator/api/operator/v1alpha1/multiclusterglobalhubagent_types.go @@ -0,0 +1,85 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + shared "github.com/stolostron/multicluster-global-hub/operator/api/operator/shared" +) + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName={mgha,mcgha} +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="The overall status of the MulticlusterGlobalHubAgent" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// MulticlusterGlobalHubAgent is the Schema for the multiclusterglobalhubagents API +type MulticlusterGlobalHubAgent struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MulticlusterGlobalHubAgentSpec `json:"spec,omitempty"` + Status MulticlusterGlobalHubAgentStatus `json:"status,omitempty"` +} + +// MulticlusterGlobalHubAgentSpec defines the desired state of MulticlusterGlobalHubAgent +type MulticlusterGlobalHubAgentSpec struct { + // ImagePullPolicy specifies the pull policy of the multicluster global hub agent image + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:imagePullPolicy"} + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + // ImagePullSecret specifies the pull secret of the multicluster global hub agent image + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:io.kubernetes:Secret"} + // +optional + ImagePullSecret string `json:"imagePullSecret,omitempty"` + // NodeSelector specifies the desired state of NodeSelector + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // Tolerations causes all components to tolerate any taints + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + // Compute Resources required by the global hub agent + // +optional + Resources *shared.ResourceRequirements `json:"resources,omitempty"` + // TransportConfigSecretName specifies the secret which is used to connect to the global hub Kafka. + // You can get kafka.yaml content using `tools/generate-kafka-config.sh` from the global hub environment. + // Then you can create the secret in the current environment by running `kubectl create secret generic transport-config -n "multicluster-global-hub" --from-file=kafka.yaml="./kafka.yaml"` + // +kubebuilder:default=transport-config + TransportConfigSecretName string `json:"transportConfigSecretName,omitempty"` +} + +// MulticlusterGlobalHubAgentStatus defines the observed state of MulticlusterGlobalHubAgent +type MulticlusterGlobalHubAgentStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// MulticlusterGlobalHubAgentList contains a list of MulticlusterGlobalHubAgent +type MulticlusterGlobalHubAgentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MulticlusterGlobalHubAgent `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MulticlusterGlobalHubAgent{}, &MulticlusterGlobalHubAgentList{}) +} diff --git a/operator/api/operator/v1alpha1/zz_generated.deepcopy.go b/operator/api/operator/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..6ea0a6238 --- /dev/null +++ b/operator/api/operator/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,136 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + + "github.com/stolostron/multicluster-global-hub/operator/api/operator/shared" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MulticlusterGlobalHubAgent) DeepCopyInto(out *MulticlusterGlobalHubAgent) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MulticlusterGlobalHubAgent. +func (in *MulticlusterGlobalHubAgent) DeepCopy() *MulticlusterGlobalHubAgent { + if in == nil { + return nil + } + out := new(MulticlusterGlobalHubAgent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MulticlusterGlobalHubAgent) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MulticlusterGlobalHubAgentList) DeepCopyInto(out *MulticlusterGlobalHubAgentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MulticlusterGlobalHubAgent, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MulticlusterGlobalHubAgentList. +func (in *MulticlusterGlobalHubAgentList) DeepCopy() *MulticlusterGlobalHubAgentList { + if in == nil { + return nil + } + out := new(MulticlusterGlobalHubAgentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MulticlusterGlobalHubAgentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MulticlusterGlobalHubAgentSpec) DeepCopyInto(out *MulticlusterGlobalHubAgentSpec) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(shared.ResourceRequirements) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MulticlusterGlobalHubAgentSpec. +func (in *MulticlusterGlobalHubAgentSpec) DeepCopy() *MulticlusterGlobalHubAgentSpec { + if in == nil { + return nil + } + out := new(MulticlusterGlobalHubAgentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MulticlusterGlobalHubAgentStatus) DeepCopyInto(out *MulticlusterGlobalHubAgentStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MulticlusterGlobalHubAgentStatus. +func (in *MulticlusterGlobalHubAgentStatus) DeepCopy() *MulticlusterGlobalHubAgentStatus { + if in == nil { + return nil + } + out := new(MulticlusterGlobalHubAgentStatus) + in.DeepCopyInto(out) + return out +} diff --git a/operator/api/operator/v1alpha4/multiclusterglobalhub_types.go b/operator/api/operator/v1alpha4/multiclusterglobalhub_types.go index 8dafc9eeb..a8c8af376 100644 --- a/operator/api/operator/v1alpha4/multiclusterglobalhub_types.go +++ b/operator/api/operator/v1alpha4/multiclusterglobalhub_types.go @@ -19,6 +19,8 @@ package v1alpha4 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + shared "github.com/stolostron/multicluster-global-hub/operator/api/operator/shared" ) // DataLayerType specifies the type of data layer that global hub stores and transports the data. @@ -141,18 +143,7 @@ type AdvancedSpec struct { type CommonSpec struct { // Compute Resources required by this component // +optional - Resources *ResourceRequirements `json:"resources,omitempty"` -} - -// ResourceRequirements copied from corev1.ResourceRequirements -// We do not need to support ResourceClaim -type ResourceRequirements struct { - // Requests describes the minimum amount of compute resources required. - // If requests are omitted for a container, it defaults to the specified limits. - // If there are no specified limits, it defaults to an implementation-defined value. - // For more information, see: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - // +optional - Requests corev1.ResourceList `json:"requests,omitempty"` + Resources *shared.ResourceRequirements `json:"resources,omitempty"` } // DataLayerSpec is a discriminated union of data layer specific configuration. diff --git a/operator/api/operator/v1alpha4/zz_generated.deepcopy.go b/operator/api/operator/v1alpha4/zz_generated.deepcopy.go index eb9e0c59b..f7c6acd8a 100644 --- a/operator/api/operator/v1alpha4/zz_generated.deepcopy.go +++ b/operator/api/operator/v1alpha4/zz_generated.deepcopy.go @@ -24,6 +24,8 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" + + "github.com/stolostron/multicluster-global-hub/operator/api/operator/shared" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -71,7 +73,7 @@ func (in *CommonSpec) DeepCopyInto(out *CommonSpec) { *out = *in if in.Resources != nil { in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) + *out = new(shared.ResourceRequirements) (*in).DeepCopyInto(*out) } } @@ -272,28 +274,6 @@ func (in *PostgresSpec) DeepCopy() *PostgresSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { - *out = *in - if in.Requests != nil { - in, out := &in.Requests, &out.Requests - *out = make(v1.ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. -func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { - if in == nil { - return nil - } - out := new(ResourceRequirements) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatusCondition) DeepCopyInto(out *StatusCondition) { *out = *in diff --git a/operator/bundle/manifests/multicluster-global-hub-operator.clusterserviceversion.yaml b/operator/bundle/manifests/multicluster-global-hub-operator.clusterserviceversion.yaml index 851051c21..b2c5dbf6f 100644 --- a/operator/bundle/manifests/multicluster-global-hub-operator.clusterserviceversion.yaml +++ b/operator/bundle/manifests/multicluster-global-hub-operator.clusterserviceversion.yaml @@ -12,6 +12,16 @@ metadata: }, "spec": {} }, + { + "apiVersion": "operator.open-cluster-management.io/v1alpha1", + "kind": "MulticlusterGlobalHubAgent", + "metadata": { + "name": "multiclusterglobalhubagent" + }, + "spec": { + "transportConfigSecretName": "transport-config" + } + }, { "apiVersion": "operator.open-cluster-management.io/v1alpha4", "kind": "MulticlusterGlobalHub", @@ -31,7 +41,7 @@ metadata: categories: Integration & Delivery,OpenShift Optional certified: "false" containerImage: quay.io/stolostron/multicluster-global-hub-operator:latest - createdAt: "2024-12-25T06:09:28Z" + createdAt: "2025-01-14T11:53:57Z" description: Manages the installation and upgrade of the Multicluster Global Hub. features.operators.openshift.io/cnf: "false" features.operators.openshift.io/cni: "false" @@ -90,6 +100,28 @@ spec: displayName: Conditions path: conditions version: v1alpha1 + - description: MulticlusterGlobalHubAgent is the Schema for the multiclusterglobalhubagents + API + displayName: Multicluster Global Hub Agent + kind: MulticlusterGlobalHubAgent + name: multiclusterglobalhubagents.operator.open-cluster-management.io + specDescriptors: + - description: ImagePullPolicy specifies the pull policy of the multicluster + global hub agent image + displayName: Image Pull Policy + path: imagePullPolicy + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:imagePullPolicy + - description: ImagePullSecret specifies the pull secret of the multicluster + global hub agent image + displayName: Image Pull Secret + path: imagePullSecret + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: Tolerations causes all components to tolerate any taints + displayName: Tolerations + path: tolerations + version: v1alpha1 - description: MulticlusterGlobalHub defines the configuration for an instance of the multiCluster global hub displayName: Multicluster Global Hub @@ -317,13 +349,16 @@ spec: - "" resources: - configmaps - - serviceaccounts - - services + - namespaces + - persistentvolumeclaims + - pods + - secrets verbs: - create - delete - get - list + - patch - update - watch - apiGroups: @@ -342,23 +377,13 @@ spec: - apiGroups: - "" resources: - - namespaces - verbs: - - get - - list - - update - - watch - - apiGroups: - - "" - resources: - - persistentvolumeclaims - - secrets + - serviceaccounts + - services verbs: - create - delete - get - list - - patch - update - watch - apiGroups: @@ -454,13 +479,23 @@ spec: - apps.open-cluster-management.io resources: - channels - - placementrules - subscriptions verbs: - get - list - patch - update + - apiGroups: + - apps.open-cluster-management.io + resources: + - placementrules + - subscriptionreports + verbs: + - get + - list + - patch + - update + - watch - apiGroups: - authentication.open-cluster-management.io resources: @@ -518,17 +553,8 @@ spec: - apiGroups: - cluster.open-cluster-management.io resources: + - clusterclaims - managedclusters - verbs: - - create - - delete - - get - - list - - update - - watch - - apiGroups: - - cluster.open-cluster-management.io - resources: - managedclustersetbindings - placements verbs: @@ -538,15 +564,21 @@ spec: - list - patch - update + - watch - apiGroups: - cluster.open-cluster-management.io resources: + - managedclusters/finalizers - managedclustersets + - placementdecisions + - placementdecisions/finalizers + - placements/finalizers verbs: - get - list - patch - update + - watch - apiGroups: - config.open-cluster-management.io resources: @@ -559,6 +591,15 @@ spec: - patch - update - watch + - apiGroups: + - config.openshift.io + resources: + - clusterversions + - infrastructures + verbs: + - get + - list + - watch - apiGroups: - coordination.k8s.io resources: @@ -587,6 +628,15 @@ spec: - get - list - watch + - apiGroups: + - internal.open-cluster-management.io + resources: + - managedclusterinfos + verbs: + - get + - list + - update + - watch - apiGroups: - kafka.strimzi.io resources: @@ -638,6 +688,7 @@ spec: - apiGroups: - operator.open-cluster-management.io resources: + - multiclusterglobalhubagents - multiclusterglobalhubs verbs: - create @@ -650,12 +701,14 @@ spec: - apiGroups: - operator.open-cluster-management.io resources: + - multiclusterglobalhubagents/finalizers - multiclusterglobalhubs/finalizers verbs: - update - apiGroups: - operator.open-cluster-management.io resources: + - multiclusterglobalhubagents/status - multiclusterglobalhubs/status verbs: - get @@ -694,11 +747,14 @@ spec: resources: - placementbindings - policies + - policyautomations + - policysets verbs: - get - list - patch - update + - watch - apiGroups: - postgres-operator.crunchydata.com resources: diff --git a/operator/bundle/manifests/operator.open-cluster-management.io_multiclusterglobalhubagents.yaml b/operator/bundle/manifests/operator.open-cluster-management.io_multiclusterglobalhubagents.yaml new file mode 100644 index 000000000..83ac05da6 --- /dev/null +++ b/operator/bundle/manifests/operator.open-cluster-management.io_multiclusterglobalhubagents.yaml @@ -0,0 +1,146 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + creationTimestamp: null + name: multiclusterglobalhubagents.operator.open-cluster-management.io +spec: + group: operator.open-cluster-management.io + names: + kind: MulticlusterGlobalHubAgent + listKind: MulticlusterGlobalHubAgentList + plural: multiclusterglobalhubagents + shortNames: + - mgha + - mcgha + singular: multiclusterglobalhubagent + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The overall status of the MulticlusterGlobalHubAgent + jsonPath: .status.phase + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: MulticlusterGlobalHubAgent is the Schema for the multiclusterglobalhubagents + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MulticlusterGlobalHubAgentSpec defines the desired state + of MulticlusterGlobalHubAgent + properties: + imagePullPolicy: + description: ImagePullPolicy specifies the pull policy of the multicluster + global hub agent image + type: string + imagePullSecret: + description: ImagePullSecret specifies the pull secret of the multicluster + global hub agent image + type: string + nodeSelector: + additionalProperties: + type: string + description: NodeSelector specifies the desired state of NodeSelector + type: object + resources: + description: Compute Resources required by the global hub agent + properties: + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If requests are omitted for a container, it defaults to the specified limits. + If there are no specified limits, it defaults to an implementation-defined value. + For more information, see: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tolerations: + description: Tolerations causes all components to tolerate any taints + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + transportConfigSecretName: + default: transport-config + description: |- + TransportConfigSecretName specifies the secret which is used to connect to the global hub Kafka. + You can get kafka.yaml content using `tools/generate-kafka-config.sh` from the global hub environment. + Then you can create the secret in the current environment by running `kubectl create secret generic transport-config -n "multicluster-global-hub" --from-file=kafka.yaml="./kafka.yaml"` + type: string + type: object + status: + description: MulticlusterGlobalHubAgentStatus defines the observed state + of MulticlusterGlobalHubAgent + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/operator/config/crd/bases/operator.open-cluster-management.io_multiclusterglobalhubagents.yaml b/operator/config/crd/bases/operator.open-cluster-management.io_multiclusterglobalhubagents.yaml new file mode 100644 index 000000000..5ed626f16 --- /dev/null +++ b/operator/config/crd/bases/operator.open-cluster-management.io_multiclusterglobalhubagents.yaml @@ -0,0 +1,140 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: multiclusterglobalhubagents.operator.open-cluster-management.io +spec: + group: operator.open-cluster-management.io + names: + kind: MulticlusterGlobalHubAgent + listKind: MulticlusterGlobalHubAgentList + plural: multiclusterglobalhubagents + shortNames: + - mgha + - mcgha + singular: multiclusterglobalhubagent + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The overall status of the MulticlusterGlobalHubAgent + jsonPath: .status.phase + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: MulticlusterGlobalHubAgent is the Schema for the multiclusterglobalhubagents + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MulticlusterGlobalHubAgentSpec defines the desired state + of MulticlusterGlobalHubAgent + properties: + imagePullPolicy: + description: ImagePullPolicy specifies the pull policy of the multicluster + global hub agent image + type: string + imagePullSecret: + description: ImagePullSecret specifies the pull secret of the multicluster + global hub agent image + type: string + nodeSelector: + additionalProperties: + type: string + description: NodeSelector specifies the desired state of NodeSelector + type: object + resources: + description: Compute Resources required by the global hub agent + properties: + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If requests are omitted for a container, it defaults to the specified limits. + If there are no specified limits, it defaults to an implementation-defined value. + For more information, see: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tolerations: + description: Tolerations causes all components to tolerate any taints + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + transportConfigSecretName: + default: transport-config + description: |- + TransportConfigSecretName specifies the secret which is used to connect to the global hub Kafka. + You can get kafka.yaml content using `tools/generate-kafka-config.sh` from the global hub environment. + Then you can create the secret in the current environment by running `kubectl create secret generic transport-config -n "multicluster-global-hub" --from-file=kafka.yaml="./kafka.yaml"` + type: string + type: object + status: + description: MulticlusterGlobalHubAgentStatus defines the observed state + of MulticlusterGlobalHubAgent + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/operator/config/crd/kustomization.yaml b/operator/config/crd/kustomization.yaml index 995861f0b..bf5d13497 100644 --- a/operator/config/crd/kustomization.yaml +++ b/operator/config/crd/kustomization.yaml @@ -4,17 +4,20 @@ resources: - bases/operator.open-cluster-management.io_multiclusterglobalhubs.yaml - bases/global-hub.open-cluster-management.io_managedclustermigrations.yaml +- bases/operator.open-cluster-management.io_multiclusterglobalhubagents.yaml #+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD #- patches/webhook_in_configs.yaml +#- path: patches/webhook_in_operator_multiclusterglobalhubagents.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD #- patches/cainjection_in_configs.yaml +#- path: patches/cainjection_in_operator_multiclusterglobalhubagents.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/operator/config/manifests/bases/multicluster-global-hub-operator.clusterserviceversion.yaml b/operator/config/manifests/bases/multicluster-global-hub-operator.clusterserviceversion.yaml index 70474b440..dadf5e687 100644 --- a/operator/config/manifests/bases/multicluster-global-hub-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/bases/multicluster-global-hub-operator.clusterserviceversion.yaml @@ -65,6 +65,28 @@ spec: displayName: Conditions path: conditions version: v1alpha1 + - description: MulticlusterGlobalHubAgent is the Schema for the multiclusterglobalhubagents + API + displayName: Multicluster Global Hub Agent + kind: MulticlusterGlobalHubAgent + name: multiclusterglobalhubagents.operator.open-cluster-management.io + specDescriptors: + - description: ImagePullPolicy specifies the pull policy of the multicluster + global hub agent image + displayName: Image Pull Policy + path: imagePullPolicy + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:imagePullPolicy + - description: ImagePullSecret specifies the pull secret of the multicluster + global hub agent image + displayName: Image Pull Secret + path: imagePullSecret + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: Tolerations causes all components to tolerate any taints + displayName: Tolerations + path: tolerations + version: v1alpha1 - description: MulticlusterGlobalHub defines the configuration for an instance of the multiCluster global hub displayName: Multicluster Global Hub diff --git a/operator/config/rbac/role.yaml b/operator/config/rbac/role.yaml index 0da0bf2ab..1ebbaecbf 100644 --- a/operator/config/rbac/role.yaml +++ b/operator/config/rbac/role.yaml @@ -8,13 +8,16 @@ rules: - "" resources: - configmaps - - serviceaccounts - - services + - namespaces + - persistentvolumeclaims + - pods + - secrets verbs: - create - delete - get - list + - patch - update - watch - apiGroups: @@ -33,23 +36,13 @@ rules: - apiGroups: - "" resources: - - namespaces - verbs: - - get - - list - - update - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - - secrets + - serviceaccounts + - services verbs: - create - delete - get - list - - patch - update - watch - apiGroups: @@ -145,13 +138,23 @@ rules: - apps.open-cluster-management.io resources: - channels - - placementrules - subscriptions verbs: - get - list - patch - update +- apiGroups: + - apps.open-cluster-management.io + resources: + - placementrules + - subscriptionreports + verbs: + - get + - list + - patch + - update + - watch - apiGroups: - authentication.open-cluster-management.io resources: @@ -209,17 +212,8 @@ rules: - apiGroups: - cluster.open-cluster-management.io resources: + - clusterclaims - managedclusters - verbs: - - create - - delete - - get - - list - - update - - watch -- apiGroups: - - cluster.open-cluster-management.io - resources: - managedclustersetbindings - placements verbs: @@ -229,15 +223,21 @@ rules: - list - patch - update + - watch - apiGroups: - cluster.open-cluster-management.io resources: + - managedclusters/finalizers - managedclustersets + - placementdecisions + - placementdecisions/finalizers + - placements/finalizers verbs: - get - list - patch - update + - watch - apiGroups: - config.open-cluster-management.io resources: @@ -250,6 +250,15 @@ rules: - patch - update - watch +- apiGroups: + - config.openshift.io + resources: + - clusterversions + - infrastructures + verbs: + - get + - list + - watch - apiGroups: - coordination.k8s.io resources: @@ -278,6 +287,15 @@ rules: - get - list - watch +- apiGroups: + - internal.open-cluster-management.io + resources: + - managedclusterinfos + verbs: + - get + - list + - update + - watch - apiGroups: - kafka.strimzi.io resources: @@ -329,6 +347,7 @@ rules: - apiGroups: - operator.open-cluster-management.io resources: + - multiclusterglobalhubagents - multiclusterglobalhubs verbs: - create @@ -341,12 +360,14 @@ rules: - apiGroups: - operator.open-cluster-management.io resources: + - multiclusterglobalhubagents/finalizers - multiclusterglobalhubs/finalizers verbs: - update - apiGroups: - operator.open-cluster-management.io resources: + - multiclusterglobalhubagents/status - multiclusterglobalhubs/status verbs: - get @@ -385,11 +406,14 @@ rules: resources: - placementbindings - policies + - policyautomations + - policysets verbs: - get - list - patch - update + - watch - apiGroups: - postgres-operator.crunchydata.com resources: diff --git a/operator/config/samples/kustomization.yaml b/operator/config/samples/kustomization.yaml index 365f910e0..112c0e54f 100644 --- a/operator/config/samples/kustomization.yaml +++ b/operator/config/samples/kustomization.yaml @@ -2,4 +2,5 @@ resources: - operator_v1alpha4_multiclusterglobalhub.yaml - global_hub_v1alpha1_managedclustermigration.yaml +- operator_v1alpha1_multiclusterglobalhubagent.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/operator/config/samples/operator_v1alpha1_multiclusterglobalhubagent.yaml b/operator/config/samples/operator_v1alpha1_multiclusterglobalhubagent.yaml new file mode 100644 index 000000000..4af1ff86d --- /dev/null +++ b/operator/config/samples/operator_v1alpha1_multiclusterglobalhubagent.yaml @@ -0,0 +1,6 @@ +apiVersion: operator.open-cluster-management.io/v1alpha1 +kind: MulticlusterGlobalHubAgent +metadata: + name: multiclusterglobalhubagent +spec: + transportConfigSecretName: transport-config diff --git a/operator/pkg/config/multiclusterglobalhub_config.go b/operator/pkg/config/multiclusterglobalhub_config.go index c75fa2cfe..953aba4ed 100644 --- a/operator/pkg/config/multiclusterglobalhub_config.go +++ b/operator/pkg/config/multiclusterglobalhub_config.go @@ -36,6 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" + "github.com/stolostron/multicluster-global-hub/operator/api/operator/v1alpha1" "github.com/stolostron/multicluster-global-hub/operator/api/operator/v1alpha4" operatorconstants "github.com/stolostron/multicluster-global-hub/operator/pkg/constants" "github.com/stolostron/multicluster-global-hub/pkg/constants" @@ -173,6 +174,19 @@ func IsPaused(mgh *v1alpha4.MulticlusterGlobalHub) bool { return false } +// IsAgentPaused returns true if the MulticlusterGlobalHubAgent instance is annotated as paused, and false otherwise +func IsAgentPaused(mgha *v1alpha1.MulticlusterGlobalHubAgent) bool { + annotations := mgha.GetAnnotations() + if annotations == nil { + return false + } + if annotations[operatorconstants.AnnotationMGHPause] != "" && + strings.EqualFold(annotations[operatorconstants.AnnotationMGHPause], "true") { + return true + } + return false +} + // WithInventory returns true means common inventory is deployed func WithInventory(mgh *v1alpha4.MulticlusterGlobalHub) bool { _, ok := mgh.GetAnnotations()[operatorconstants.AnnotationMGHWithInventory] @@ -399,3 +413,16 @@ func GetMulticlusterGlobalHub(ctx context.Context, c client.Client) (*v1alpha4.M } return &mghList.Items[0], nil } + +func GetMulticlusterGlobalHubAgent(ctx context.Context, c client.Client) (*v1alpha1.MulticlusterGlobalHubAgent, error) { + mghaList := &v1alpha1.MulticlusterGlobalHubAgentList{} + err := c.List(ctx, mghaList) + if err != nil { + return nil, err + } + if len(mghaList.Items) != 1 { + log.Infof("mgha should have 1 instance, but got %v", len(mghaList.Items)) + return nil, nil + } + return &mghaList.Items[0], nil +} diff --git a/operator/pkg/config/scheme_config.go b/operator/pkg/config/scheme_config.go index c930830ac..0c2b3d365 100644 --- a/operator/pkg/config/scheme_config.go +++ b/operator/pkg/config/scheme_config.go @@ -3,6 +3,7 @@ package config import ( kafkav1beta2 "github.com/RedHatInsights/strimzi-client-go/apis/kafka.strimzi.io/v1beta2" postgresv1beta1 "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" + configv1 "github.com/openshift/api/config/v1" imagev1 "github.com/openshift/api/image/v1" routev1 "github.com/openshift/api/route/v1" subv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" @@ -27,6 +28,7 @@ import ( appsubV1alpha1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1alpha1" applicationv1beta1 "sigs.k8s.io/application/api/v1beta1" + globalhubv1alpha1 "github.com/stolostron/multicluster-global-hub/operator/api/operator/v1alpha1" globalhubv1alpha4 "github.com/stolostron/multicluster-global-hub/operator/api/operator/v1alpha4" ) @@ -34,6 +36,7 @@ func GetRuntimeScheme() *runtime.Scheme { scheme := runtime.NewScheme() utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(routev1.AddToScheme(scheme)) + utilruntime.Must(configv1.AddToScheme(scheme)) utilruntime.Must(operatorsv1.AddToScheme(scheme)) utilruntime.Must(clusterv1.AddToScheme(scheme)) utilruntime.Must(clusterv1beta1.AddToScheme(scheme)) @@ -41,6 +44,7 @@ func GetRuntimeScheme() *runtime.Scheme { utilruntime.Must(workv1.AddToScheme(scheme)) utilruntime.Must(addonv1alpha1.AddToScheme(scheme)) utilruntime.Must(globalhubv1alpha4.AddToScheme(scheme)) + utilruntime.Must(globalhubv1alpha1.AddToScheme(scheme)) utilruntime.Must(appsubv1.SchemeBuilder.AddToScheme(scheme)) utilruntime.Must(appsubV1alpha1.AddToScheme(scheme)) utilruntime.Must(subv1alpha1.AddToScheme(scheme)) diff --git a/operator/pkg/config/status.go b/operator/pkg/config/status.go index ab75a9d68..15b7e7471 100644 --- a/operator/pkg/config/status.go +++ b/operator/pkg/config/status.go @@ -53,6 +53,7 @@ const ( COMPONENTS_KAFKA_NAME = "kafka" COMPONENTS_POSTGRES_NAME = "multicluster-global-hub-postgresql" COMPONENTS_MANAGER_NAME = "multicluster-global-hub-manager" + COMPONENTS_AGENT_NAME = "multicluster-global-hub-agent" COMPONENTS_GRAFANA_NAME = "multicluster-global-hub-grafana" COMPONENTS_INVENTORY_API_NAME = "inventory-api" ) diff --git a/operator/pkg/controllers/agent/addon_agent.go b/operator/pkg/controllers/agent/addon_agent.go index 4b281340d..107217fa0 100644 --- a/operator/pkg/controllers/agent/addon_agent.go +++ b/operator/pkg/controllers/agent/addon_agent.go @@ -145,7 +145,7 @@ func (a *GlobalHubAddonAgent) GetValues(cluster *clusterv1.ManagedCluster, StackroxPollInterval: config.GetStackroxPollInterval(mgh), } - if err := setTransportConfigs(a.ctx, &manifestsConfig, cluster, a.client); err != nil { + if err := setTransportConfigs(&manifestsConfig, cluster, a.client); err != nil { log.Errorw("failed to set transport config", "error", err) return nil, err } diff --git a/operator/pkg/controllers/agent/addon_agent_manifest_transport.go b/operator/pkg/controllers/agent/addon_agent_manifest_transport.go index a76ae954d..3ec238f10 100644 --- a/operator/pkg/controllers/agent/addon_agent_manifest_transport.go +++ b/operator/pkg/controllers/agent/addon_agent_manifest_transport.go @@ -16,7 +16,7 @@ import ( "github.com/stolostron/multicluster-global-hub/pkg/transport" ) -func setTransportConfigs(ctx context.Context, manifestsConfig *config.ManifestsConfig, +func setTransportConfigs(manifestsConfig *config.ManifestsConfig, cluster *clusterv1.ManagedCluster, c client.Client, ) error { if config.EnableInventory() { diff --git a/operator/pkg/controllers/agent/default_agent_controller.go b/operator/pkg/controllers/agent/default_agent_controller.go index f5b4677e1..01a90045e 100644 --- a/operator/pkg/controllers/agent/default_agent_controller.go +++ b/operator/pkg/controllers/agent/default_agent_controller.go @@ -11,7 +11,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "open-cluster-management.io/api/addon/v1alpha1" addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" ctrl "sigs.k8s.io/controller-runtime" @@ -146,7 +145,7 @@ func StartDefaultAgentController(initOption config.ControllerOption) (config.Con // WatchesRawSource(source.Kind(acmCache, &clusterv1.ManagedCluster{}), // &handler.EnqueueRequestForObject{}, builder.WithPredicates(clusterPred)). // secondary watch for managedclusteraddon - Watches(&v1alpha1.ManagedClusterAddOn{}, + Watches(&addonv1alpha1.ManagedClusterAddOn{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { return []reconcile.Request{ // only trigger the addon reconcile when addon is updated/deleted @@ -156,7 +155,7 @@ func StartDefaultAgentController(initOption config.ControllerOption) (config.Con } }), builder.WithPredicates(mghAddonPred)). // secondary watch for managedclusteraddon - Watches(&v1alpha1.ClusterManagementAddOn{}, + Watches(&addonv1alpha1.ClusterManagementAddOn{}, handler.EnqueueRequestsFromMapFunc(defaultAgentController.renderAllManifestsHandler), builder.WithPredicates(clusterManagementAddonPred)). // secondary watch for transport credentials or image pull secret @@ -222,7 +221,7 @@ func (r *DefaultAgentController) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } - clusterManagementAddOn := &v1alpha1.ClusterManagementAddOn{} + clusterManagementAddOn := &addonv1alpha1.ClusterManagementAddOn{} err = r.Get(ctx, types.NamespacedName{ Name: operatorconstants.GHClusterManagementAddonName, }, clusterManagementAddOn) @@ -297,14 +296,14 @@ func (r *DefaultAgentController) deleteClusterManagementAddon(ctx context.Contex } func (r *DefaultAgentController) reconcileAddonAndResources(ctx context.Context, cluster *clusterv1.ManagedCluster, - cma *v1alpha1.ClusterManagementAddOn, + cma *addonv1alpha1.ClusterManagementAddOn, ) error { expectedAddon, err := expectedManagedClusterAddon(cluster, cma) if err != nil { return err } - existingAddon := &v1alpha1.ManagedClusterAddOn{ + existingAddon := &addonv1alpha1.ManagedClusterAddOn{ ObjectMeta: metav1.ObjectMeta{ Name: constants.GHManagedClusterAddonName, Namespace: cluster.Name, @@ -361,7 +360,7 @@ func ensureTransportResource(clusterName string) error { func (r *DefaultAgentController) removeResourcesAndAddon(ctx context.Context, cluster *clusterv1.ManagedCluster) error { // should remove the addon first, otherwise it mightn't update the mainfiest work for the addon - existingAddon := &v1alpha1.ManagedClusterAddOn{ + existingAddon := &addonv1alpha1.ManagedClusterAddOn{ ObjectMeta: metav1.ObjectMeta{ Name: constants.GHManagedClusterAddonName, Namespace: cluster.Name, @@ -386,10 +385,10 @@ func (r *DefaultAgentController) removeResourcesAndAddon(ctx context.Context, cl return trans.Prune(cluster.Name) } -func expectedManagedClusterAddon(cluster *clusterv1.ManagedCluster, cma *v1alpha1.ClusterManagementAddOn) ( - *v1alpha1.ManagedClusterAddOn, error, +func expectedManagedClusterAddon(cluster *clusterv1.ManagedCluster, cma *addonv1alpha1.ClusterManagementAddOn) ( + *addonv1alpha1.ManagedClusterAddOn, error, ) { - expectedAddon := &v1alpha1.ManagedClusterAddOn{ + expectedAddon := &addonv1alpha1.ManagedClusterAddOn{ ObjectMeta: metav1.ObjectMeta{ Name: constants.GHManagedClusterAddonName, Namespace: cluster.Name, @@ -406,7 +405,7 @@ func expectedManagedClusterAddon(cluster *clusterv1.ManagedCluster, cma *v1alpha }, }, }, - Spec: v1alpha1.ManagedClusterAddOnSpec{ + Spec: addonv1alpha1.ManagedClusterAddOnSpec{ InstallNamespace: constants.GHAgentNamespace, }, } diff --git a/operator/pkg/controllers/agent/hosted_agent_controller.go b/operator/pkg/controllers/agent/hosted_agent_controller.go index 6cdba56e3..dd0ea3c58 100644 --- a/operator/pkg/controllers/agent/hosted_agent_controller.go +++ b/operator/pkg/controllers/agent/hosted_agent_controller.go @@ -23,7 +23,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" - "open-cluster-management.io/api/addon/v1alpha1" addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -50,7 +49,6 @@ type HostedAgentController struct { } var ( - hostedAddonController *HostedAgentController isHostedAgentResourceRemoved = true hostedAgentController *HostedAgentController ) @@ -94,7 +92,7 @@ func NewHostedAgentController(mgr ctrl.Manager) *HostedAgentController { // SetupWithManager sets up the controller with the Manager. func (r *HostedAgentController) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr).Named("AddonsController"). - For(&v1alpha1.ClusterManagementAddOn{}, + For(&addonv1alpha1.ClusterManagementAddOn{}, builder.WithPredicates(addonPred)). // requeue all cma when mgh annotation changed. Watches(&globalhubv1alpha4.MulticlusterGlobalHub{}, @@ -178,7 +176,7 @@ func (r *HostedAgentController) Reconcile(ctx context.Context, req ctrl.Request) } isHostedAgentResourceRemoved = false - cma := &v1alpha1.ClusterManagementAddOn{} + cma := &addonv1alpha1.ClusterManagementAddOn{} err = r.c.Get(ctx, req.NamespacedName, cma) if err != nil { return ctrl.Result{}, err @@ -198,7 +196,7 @@ func (r *HostedAgentController) Reconcile(ctx context.Context, req ctrl.Request) } func (r *HostedAgentController) revertClusterManagementAddon(ctx context.Context) error { - cmaList := &v1alpha1.ClusterManagementAddOnList{} + cmaList := &addonv1alpha1.ClusterManagementAddOnList{} err := r.c.List(ctx, cmaList) if err != nil { return err @@ -215,7 +213,7 @@ func (r *HostedAgentController) revertClusterManagementAddon(ctx context.Context return nil } -func (r *HostedAgentController) removeGlobalhubConfig(ctx context.Context, cma v1alpha1.ClusterManagementAddOn) error { +func (r *HostedAgentController) removeGlobalhubConfig(ctx context.Context, cma addonv1alpha1.ClusterManagementAddOn) error { if len(cma.Spec.InstallStrategy.Placements) == 0 { return nil } @@ -250,7 +248,7 @@ func (r *HostedAgentController) pruneHostedResources(ctx context.Context) error } func (r *HostedAgentController) hasManagedHub(ctx context.Context) (bool, error) { - mcaList := &v1alpha1.ManagedClusterAddOnList{} + mcaList := &addonv1alpha1.ManagedClusterAddOnList{} err := r.c.List(ctx, mcaList) if err != nil { return false, err @@ -265,7 +263,7 @@ func (r *HostedAgentController) hasManagedHub(ctx context.Context) (bool, error) } // addAddonConfig add the config to cma, will return true if the cma updated -func addAddonConfig(cma *v1alpha1.ClusterManagementAddOn) bool { +func addAddonConfig(cma *addonv1alpha1.ClusterManagementAddOn) bool { if len(cma.Spec.InstallStrategy.Placements) == 0 { cma.Spec.InstallStrategy.Placements = append(cma.Spec.InstallStrategy.Placements, config.GlobalHubHostedAddonPlacementStrategy) diff --git a/operator/pkg/controllers/agent/hosted_agent_controller_test.go b/operator/pkg/controllers/agent/hosted_agent_controller_test.go index ff6506fd3..2240acc51 100644 --- a/operator/pkg/controllers/agent/hosted_agent_controller_test.go +++ b/operator/pkg/controllers/agent/hosted_agent_controller_test.go @@ -24,7 +24,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" - "open-cluster-management.io/api/addon/v1alpha1" addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -36,39 +35,39 @@ import ( func TestHostedAgentConfig(t *testing.T) { tests := []struct { name string - cma *v1alpha1.ClusterManagementAddOn - expectCma *v1alpha1.ClusterManagementAddOn + cma *addonv1alpha1.ClusterManagementAddOn + expectCma *addonv1alpha1.ClusterManagementAddOn want bool }{ { name: "empty spec", - cma: &v1alpha1.ClusterManagementAddOn{ + cma: &addonv1alpha1.ClusterManagementAddOn{ ObjectMeta: metav1.ObjectMeta{ Name: "work-manager", Namespace: "c1", }, }, - expectCma: &v1alpha1.ClusterManagementAddOn{ + expectCma: &addonv1alpha1.ClusterManagementAddOn{ ObjectMeta: metav1.ObjectMeta{ Name: "work-manager", Namespace: "c1", }, - Spec: v1alpha1.ClusterManagementAddOnSpec{ - InstallStrategy: v1alpha1.InstallStrategy{ + Spec: addonv1alpha1.ClusterManagementAddOnSpec{ + InstallStrategy: addonv1alpha1.InstallStrategy{ Type: "Manual", - Placements: []v1alpha1.PlacementStrategy{ + Placements: []addonv1alpha1.PlacementStrategy{ { - PlacementRef: v1alpha1.PlacementRef{ + PlacementRef: addonv1alpha1.PlacementRef{ Namespace: constants.GHDefaultNamespace, Name: "non-local-cluster", }, - Configs: []v1alpha1.AddOnConfig{ + Configs: []addonv1alpha1.AddOnConfig{ { - ConfigReferent: v1alpha1.ConfigReferent{ + ConfigReferent: addonv1alpha1.ConfigReferent{ Name: "global-hub", Namespace: constants.GHDefaultNamespace, }, - ConfigGroupResource: v1alpha1.ConfigGroupResource{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{ Group: "addon.open-cluster-management.io", Resource: "addondeploymentconfigs", }, @@ -83,17 +82,17 @@ func TestHostedAgentConfig(t *testing.T) { }, { name: "has config in spec", - cma: &v1alpha1.ClusterManagementAddOn{ + cma: &addonv1alpha1.ClusterManagementAddOn{ ObjectMeta: metav1.ObjectMeta{ Name: "work-manager", Namespace: "c1", }, - Spec: v1alpha1.ClusterManagementAddOnSpec{ - InstallStrategy: v1alpha1.InstallStrategy{ + Spec: addonv1alpha1.ClusterManagementAddOnSpec{ + InstallStrategy: addonv1alpha1.InstallStrategy{ Type: "Manual", - Placements: []v1alpha1.PlacementStrategy{ + Placements: []addonv1alpha1.PlacementStrategy{ { - PlacementRef: v1alpha1.PlacementRef{ + PlacementRef: addonv1alpha1.PlacementRef{ Namespace: "ns", Name: "pl", }, @@ -102,33 +101,33 @@ func TestHostedAgentConfig(t *testing.T) { }, }, }, - expectCma: &v1alpha1.ClusterManagementAddOn{ + expectCma: &addonv1alpha1.ClusterManagementAddOn{ ObjectMeta: metav1.ObjectMeta{ Name: "work-manager", Namespace: "c1", }, - Spec: v1alpha1.ClusterManagementAddOnSpec{ - InstallStrategy: v1alpha1.InstallStrategy{ + Spec: addonv1alpha1.ClusterManagementAddOnSpec{ + InstallStrategy: addonv1alpha1.InstallStrategy{ Type: "Manual", - Placements: []v1alpha1.PlacementStrategy{ + Placements: []addonv1alpha1.PlacementStrategy{ { - PlacementRef: v1alpha1.PlacementRef{ + PlacementRef: addonv1alpha1.PlacementRef{ Namespace: "ns", Name: "pl", }, }, { - PlacementRef: v1alpha1.PlacementRef{ + PlacementRef: addonv1alpha1.PlacementRef{ Namespace: constants.GHDefaultNamespace, Name: "non-local-cluster", }, - Configs: []v1alpha1.AddOnConfig{ + Configs: []addonv1alpha1.AddOnConfig{ { - ConfigReferent: v1alpha1.ConfigReferent{ + ConfigReferent: addonv1alpha1.ConfigReferent{ Name: "global-hub", Namespace: constants.GHDefaultNamespace, }, - ConfigGroupResource: v1alpha1.ConfigGroupResource{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{ Group: "addon.open-cluster-management.io", Resource: "addondeploymentconfigs", }, @@ -143,27 +142,27 @@ func TestHostedAgentConfig(t *testing.T) { }, { name: "has needed config in spec", - cma: &v1alpha1.ClusterManagementAddOn{ + cma: &addonv1alpha1.ClusterManagementAddOn{ ObjectMeta: metav1.ObjectMeta{ Name: "work-manager", Namespace: "c1", }, - Spec: v1alpha1.ClusterManagementAddOnSpec{ - InstallStrategy: v1alpha1.InstallStrategy{ + Spec: addonv1alpha1.ClusterManagementAddOnSpec{ + InstallStrategy: addonv1alpha1.InstallStrategy{ Type: "Manual", - Placements: []v1alpha1.PlacementStrategy{ + Placements: []addonv1alpha1.PlacementStrategy{ { - PlacementRef: v1alpha1.PlacementRef{ + PlacementRef: addonv1alpha1.PlacementRef{ Namespace: constants.GHDefaultNamespace, Name: "non-local-cluster", }, - Configs: []v1alpha1.AddOnConfig{ + Configs: []addonv1alpha1.AddOnConfig{ { - ConfigReferent: v1alpha1.ConfigReferent{ + ConfigReferent: addonv1alpha1.ConfigReferent{ Name: "global-hub", Namespace: constants.GHDefaultNamespace, }, - ConfigGroupResource: v1alpha1.ConfigGroupResource{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{ Group: "addon.open-cluster-management.io", Resource: "addondeploymentconfigs", }, @@ -174,27 +173,27 @@ func TestHostedAgentConfig(t *testing.T) { }, }, }, - expectCma: &v1alpha1.ClusterManagementAddOn{ + expectCma: &addonv1alpha1.ClusterManagementAddOn{ ObjectMeta: metav1.ObjectMeta{ Name: "work-manager", Namespace: "c1", }, - Spec: v1alpha1.ClusterManagementAddOnSpec{ - InstallStrategy: v1alpha1.InstallStrategy{ + Spec: addonv1alpha1.ClusterManagementAddOnSpec{ + InstallStrategy: addonv1alpha1.InstallStrategy{ Type: "Manual", - Placements: []v1alpha1.PlacementStrategy{ + Placements: []addonv1alpha1.PlacementStrategy{ { - PlacementRef: v1alpha1.PlacementRef{ + PlacementRef: addonv1alpha1.PlacementRef{ Namespace: constants.GHDefaultNamespace, Name: "non-local-cluster", }, - Configs: []v1alpha1.AddOnConfig{ + Configs: []addonv1alpha1.AddOnConfig{ { - ConfigReferent: v1alpha1.ConfigReferent{ + ConfigReferent: addonv1alpha1.ConfigReferent{ Name: "global-hub", Namespace: constants.GHDefaultNamespace, }, - ConfigGroupResource: v1alpha1.ConfigGroupResource{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{ Group: "addon.open-cluster-management.io", Resource: "addondeploymentconfigs", }, @@ -282,7 +281,7 @@ func TestPruneReconciler_revertClusterManagementAddon(t *testing.T) { { name: "cma do not have placements", cmas: []runtime.Object{ - &v1alpha1.ClusterManagementAddOn{ + &addonv1alpha1.ClusterManagementAddOn{ ObjectMeta: metav1.ObjectMeta{ Name: "work-manager", Namespace: utils.GetDefaultNamespace(), @@ -294,7 +293,7 @@ func TestPruneReconciler_revertClusterManagementAddon(t *testing.T) { { name: "cma do not have target placements", cmas: []runtime.Object{ - &v1alpha1.ClusterManagementAddOn{ + &addonv1alpha1.ClusterManagementAddOn{ ObjectMeta: metav1.ObjectMeta{ Name: "work-manager", Namespace: utils.GetDefaultNamespace(), @@ -302,18 +301,18 @@ func TestPruneReconciler_revertClusterManagementAddon(t *testing.T) { Spec: addonv1alpha1.ClusterManagementAddOnSpec{ InstallStrategy: addonv1alpha1.InstallStrategy{ Placements: []addonv1alpha1.PlacementStrategy{ - v1alpha1.PlacementStrategy{ - PlacementRef: v1alpha1.PlacementRef{ + { + PlacementRef: addonv1alpha1.PlacementRef{ Namespace: constants.GHDefaultNamespace, Name: "global", }, - Configs: []v1alpha1.AddOnConfig{ + Configs: []addonv1alpha1.AddOnConfig{ { - ConfigReferent: v1alpha1.ConfigReferent{ + ConfigReferent: addonv1alpha1.ConfigReferent{ Name: "global-hub", Namespace: constants.GHDefaultNamespace, }, - ConfigGroupResource: v1alpha1.ConfigGroupResource{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{ Group: "addon.open-cluster-management.io", Resource: "addondeploymentconfigs", }, @@ -330,7 +329,7 @@ func TestPruneReconciler_revertClusterManagementAddon(t *testing.T) { { name: "cma have target placements", cmas: []runtime.Object{ - &v1alpha1.ClusterManagementAddOn{ + &addonv1alpha1.ClusterManagementAddOn{ ObjectMeta: metav1.ObjectMeta{ Name: "work-manager", Namespace: utils.GetDefaultNamespace(), @@ -338,18 +337,18 @@ func TestPruneReconciler_revertClusterManagementAddon(t *testing.T) { Spec: addonv1alpha1.ClusterManagementAddOnSpec{ InstallStrategy: addonv1alpha1.InstallStrategy{ Placements: []addonv1alpha1.PlacementStrategy{ - v1alpha1.PlacementStrategy{ - PlacementRef: v1alpha1.PlacementRef{ + { + PlacementRef: addonv1alpha1.PlacementRef{ Namespace: constants.GHDefaultNamespace, Name: "non-local-cluster", }, - Configs: []v1alpha1.AddOnConfig{ + Configs: []addonv1alpha1.AddOnConfig{ { - ConfigReferent: v1alpha1.ConfigReferent{ + ConfigReferent: addonv1alpha1.ConfigReferent{ Name: "global-hub", Namespace: constants.GHDefaultNamespace, }, - ConfigGroupResource: v1alpha1.ConfigGroupResource{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{ Group: "addon.open-cluster-management.io", Resource: "addondeploymentconfigs", }, @@ -375,7 +374,7 @@ func TestPruneReconciler_revertClusterManagementAddon(t *testing.T) { if err := hac.revertClusterManagementAddon(ctx); (err != nil) != tt.wantErr { t.Errorf("PruneReconciler.revertClusterManagementAddon() error = %v, wantErr %v", err, tt.wantErr) } - cmaList := &v1alpha1.ClusterManagementAddOnList{} + cmaList := &addonv1alpha1.ClusterManagementAddOnList{} err := hac.c.List(ctx, cmaList) if err != nil { diff --git a/operator/pkg/controllers/agent/manifests/standalone-agent/clusterrole.yaml b/operator/pkg/controllers/agent/manifests/standalone-agent/clusterrole.yaml new file mode 100644 index 000000000..b36892246 --- /dev/null +++ b/operator/pkg/controllers/agent/manifests/standalone-agent/clusterrole.yaml @@ -0,0 +1,126 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multicluster-global-hub:multicluster-global-hub-agent +rules: +- apiGroups: + - "policy.open-cluster-management.io" + resources: + - placementbindings + - policies + - policyautomations + - policysets + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.open-cluster-management.io + resources: + - placements + - managedclustersets + - managedclustersetbindings + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.open-cluster-management.io + resources: + - managedclusters + - managedclusters/finalizers + - placementdecisions + - placementdecisions/finalizers + - placements + - placements/finalizers + verbs: + - get + - list + - watch + - update +- apiGroups: + - cluster.open-cluster-management.io + resources: + - clusterclaims + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - namespaces + - pods + - configmaps + - events + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch +- apiGroups: + - route.openshift.io + resources: + - routes + verbs: + - list + - watch + - get +- apiGroups: + - config.openshift.io + resources: + - clusterversions + verbs: + - get + - list + - watch +- apiGroups: + - internal.open-cluster-management.io + resources: + - managedclusterinfos + verbs: + - get + - list + - watch + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - apps.open-cluster-management.io + resources: + - placementrules + - subscriptionreports + verbs: + - get + - list + - patch + - update + - watch diff --git a/operator/pkg/controllers/agent/manifests/standalone-agent/clusterrolebinding.yaml b/operator/pkg/controllers/agent/manifests/standalone-agent/clusterrolebinding.yaml new file mode 100644 index 000000000..ced1504b7 --- /dev/null +++ b/operator/pkg/controllers/agent/manifests/standalone-agent/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multicluster-global-hub:multicluster-global-hub-agent +subjects: +- kind: ServiceAccount + name: multicluster-global-hub-agent + namespace: {{.Namespace}} +roleRef: + kind: ClusterRole + name: multicluster-global-hub:multicluster-global-hub-agent + apiGroup: rbac.authorization.k8s.io diff --git a/operator/pkg/controllers/agent/manifests/standalone-agent/configmap.yaml b/operator/pkg/controllers/agent/manifests/standalone-agent/configmap.yaml new file mode 100644 index 000000000..533912bc4 --- /dev/null +++ b/operator/pkg/controllers/agent/manifests/standalone-agent/configmap.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: multicluster-global-hub-agent-config + namespace: {{.Namespace}} +data: + managedClusters: "5s" + policies: "5s" + hubClusterInfo: "60s" + hubClusterHeartbeat: "60s" + aggregationLevel: full + enableLocalPolicies: "true" + logLevel: "info" diff --git a/operator/pkg/controllers/agent/manifests/standalone-agent/deployment.yaml b/operator/pkg/controllers/agent/manifests/standalone-agent/deployment.yaml new file mode 100644 index 000000000..e564b09bc --- /dev/null +++ b/operator/pkg/controllers/agent/manifests/standalone-agent/deployment.yaml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: multicluster-global-hub-agent + namespace: {{.Namespace}} +spec: + replicas: 1 + selector: + matchLabels: + name: multicluster-global-hub-agent + template: + metadata: + labels: + name: multicluster-global-hub-agent + spec: + serviceAccountName: multicluster-global-hub-agent + containers: + - name: multicluster-global-hub-agent + image: {{ .Image }} + {{- if .Resources.Requests }} + resources: + requests: + {{- range $key, $value := .Resources.Requests }} + {{$key}}: {{.ToUnstructured}} + {{- end }} + {{- end }} + imagePullPolicy: {{ .ImagePullPolicy }} + args: + - --pod-namespace=$(POD_NAMESPACE) + - --leaf-hub-name={{ .ClusterId }} + - --lease-duration={{.LeaseDuration}} + - --renew-deadline={{.RenewDeadline}} + - --retry-period={{.RetryPeriod}} + - --qps={{.AgentQPS}} + - --burst={{.AgentBurst}} + - --standalone=true + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + {{- if .ImagePullSecret }} + imagePullSecrets: + - name: {{ .ImagePullSecret }} + {{- end }} + {{- if .NodeSelector }} + nodeSelector: + {{- range $key, $value := .NodeSelector}} + "{{$key}}": "{{$value}}" + {{- end}} + {{- end }} + {{- if .Tolerations }} + tolerations: + {{- range .Tolerations}} + - key: "{{.Key}}" + operator: "{{.Operator}}" + value: "{{.Value}}" + effect: "{{.Effect}}" + {{- if .TolerationSeconds}} + tolerationSeconds: {{.TolerationSeconds}} + {{- end}} + {{- end}} + {{- end }} diff --git a/operator/pkg/controllers/agent/manifests/standalone-agent/serviceaccount.yaml b/operator/pkg/controllers/agent/manifests/standalone-agent/serviceaccount.yaml new file mode 100644 index 000000000..81c08a5e4 --- /dev/null +++ b/operator/pkg/controllers/agent/manifests/standalone-agent/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: multicluster-global-hub-agent + namespace: {{.Namespace}} diff --git a/operator/pkg/controllers/agent/standalone_agent_controller.go b/operator/pkg/controllers/agent/standalone_agent_controller.go new file mode 100644 index 000000000..15964954b --- /dev/null +++ b/operator/pkg/controllers/agent/standalone_agent_controller.go @@ -0,0 +1,202 @@ +package agent + +import ( + "context" + "embed" + "fmt" + "strconv" + + configv1 "github.com/openshift/api/config/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/discovery" + "k8s.io/client-go/discovery/cached/memory" + "k8s.io/client-go/restmapper" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/stolostron/multicluster-global-hub/operator/api/operator/v1alpha1" + "github.com/stolostron/multicluster-global-hub/operator/pkg/config" + operatorconstants "github.com/stolostron/multicluster-global-hub/operator/pkg/constants" + "github.com/stolostron/multicluster-global-hub/operator/pkg/deployer" + "github.com/stolostron/multicluster-global-hub/operator/pkg/renderer" + "github.com/stolostron/multicluster-global-hub/operator/pkg/utils" + commonutils "github.com/stolostron/multicluster-global-hub/pkg/utils" +) + +var ( + standaloneAgentStarted = false + //go:embed manifests/standalone-agent + fs embed.FS +) + +type StandaloneAgentController struct { + ctrl.Manager +} + +var deplomentPred = predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return e.Object.GetNamespace() == commonutils.GetDefaultNamespace() && + e.Object.GetName() == config.COMPONENTS_AGENT_NAME + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return e.ObjectNew.GetNamespace() == commonutils.GetDefaultNamespace() && + e.ObjectNew.GetName() == config.COMPONENTS_AGENT_NAME + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return e.Object.GetNamespace() == commonutils.GetDefaultNamespace() && + e.Object.GetName() == config.COMPONENTS_AGENT_NAME + }, +} + +func StartStandaloneAgentController(ctx context.Context, mgr ctrl.Manager) error { + if standaloneAgentStarted { + return nil + } + agentReconciler := &StandaloneAgentController{ + Manager: mgr, + } + + err := ctrl.NewControllerManagedBy(mgr). + Named("standalone-agent-reconciler"). + Watches(&v1alpha1.MulticlusterGlobalHubAgent{}, + &handler.EnqueueRequestForObject{}). + Watches(&appsv1.Deployment{}, + &handler.EnqueueRequestForObject{}, builder.WithPredicates(deplomentPred)). + Watches(&corev1.ConfigMap{}, + &handler.EnqueueRequestForObject{}, builder.WithPredicates(config.GeneralPredicate)). + Watches(&corev1.ServiceAccount{}, + &handler.EnqueueRequestForObject{}, builder.WithPredicates(config.GeneralPredicate)). + Watches(&rbacv1.ClusterRole{}, + &handler.EnqueueRequestForObject{}, builder.WithPredicates(config.GeneralPredicate)). + Watches(&rbacv1.ClusterRoleBinding{}, + &handler.EnqueueRequestForObject{}, builder.WithPredicates(config.GeneralPredicate)). + Complete(agentReconciler) + if err != nil { + return err + } + standaloneAgentStarted = true + + // trigger the reconciler at the beginning to apply resources + if _, err := agentReconciler.Reconcile(ctx, reconcile.Request{}); err != nil { + log.Error(err) + } + return nil +} + +// +kubebuilder:rbac:groups=operator.open-cluster-management.io,resources=multiclusterglobalhubagents,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=operator.open-cluster-management.io,resources=multiclusterglobalhubagents/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=operator.open-cluster-management.io,resources=multiclusterglobalhubagents/finalizers,verbs=update +// +kubebuilder:rbac:groups="config.openshift.io",resources=infrastructures;clusterversions,verbs=get;list;watch +// +kubebuilder:rbac:groups="policy.open-cluster-management.io",resources=policyautomations;policysets;placementbindings;policies,verbs=get;list;watch;patch;update +// +kubebuilder:rbac:groups="cluster.open-cluster-management.io",resources=placements;managedclustersets;managedclustersetbindings,verbs=get;list;watch;patch;update +// +kubebuilder:rbac:groups="cluster.open-cluster-management.io",resources=managedclusters;managedclusters/finalizers;placementdecisions;placementdecisions/finalizers;placements;placements/finalizers,verbs=get;list;watch;patch;update +// +kubebuilder:rbac:groups="cluster.open-cluster-management.io",resources=clusterclaims,verbs=create;get;list;watch;patch;update;delete +// +kubebuilder:rbac:groups="",resources=namespaces;pods;configmaps;events;secrets,verbs=create;get;list;watch;patch;update;delete +// +kubebuilder:rbac:groups="apiextensions.k8s.io",resources=customresourcedefinitions,verbs=list;watch +// +kubebuilder:rbac:groups="route.openshift.io",resources=routes,verbs=get;list;watch +// +kubebuilder:rbac:groups="internal.open-cluster-management.io",resources=managedclusterinfos,verbs=get;list;watch;update +// +kubebuilder:rbac:groups="apps.open-cluster-management.io",resources=placementrules;subscriptionreports,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups="coordination.k8s.io",resources=leases,verbs=create;get;list;watch;patch;update + +func (s *StandaloneAgentController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + mgha, err := config.GetMulticlusterGlobalHubAgent(ctx, s.Manager.GetClient()) + if err != nil { + if !errors.IsNotFound(err) { + return ctrl.Result{}, err + } else { + return ctrl.Result{}, nil + } + } + + if config.IsAgentPaused(mgha) || mgha.DeletionTimestamp != nil { + return ctrl.Result{}, nil + } + // create new HoHRenderer and HoHDeployer + hohRenderer, hohDeployer := renderer.NewHoHRenderer(fs), deployer.NewHoHDeployer(s.GetClient()) + + // create discovery client + dc, err := discovery.NewDiscoveryClientForConfig(s.Manager.GetConfig()) + if err != nil { + return ctrl.Result{}, err + } + + imagePullPolicy := corev1.PullAlways + if mgha.Spec.ImagePullPolicy != "" { + imagePullPolicy = mgha.Spec.ImagePullPolicy + } + agentQPS, agentBurst := config.GetAgentRestConfig() + + // set resource requirements + resourceReq := corev1.ResourceRequirements{} + requests := corev1.ResourceList{ + corev1.ResourceName(corev1.ResourceMemory): resource.MustParse(operatorconstants.AgentMemoryRequest), + corev1.ResourceName(corev1.ResourceCPU): resource.MustParse(operatorconstants.AgentCPURequest), + } + utils.SetResourcesFromCR(mgha.Spec.Resources, requests) + resourceReq.Requests = requests + + electionConfig, err := config.GetElectionConfig() + if err != nil { + log.Errorw("failed to get election config", "error", err) + return ctrl.Result{}, err + } + + infra := &configv1.Infrastructure{} + namespacedName := types.NamespacedName{Name: "cluster"} + err = s.Manager.GetClient().Get(ctx, namespacedName, infra) + if err != nil { + return ctrl.Result{}, err + } + + // create restmapper for deployer to find GVR + mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(dc)) + + agentObjects, err := hohRenderer.Render("manifests/standalone-agent", "", func(profile string) (interface{}, error) { + return struct { + Image string + ImagePullSecret string + ImagePullPolicy string + Namespace string + NodeSelector map[string]string + Tolerations []corev1.Toleration + LeaseDuration string + RenewDeadline string + RetryPeriod string + AgentQPS float32 + AgentBurst int + LogLevel string + ClusterId string + Resources *corev1.ResourceRequirements + }{ + Image: config.GetImage(config.GlobalHubAgentImageKey), + ImagePullSecret: mgha.Spec.ImagePullSecret, + ImagePullPolicy: string(imagePullPolicy), + Namespace: mgha.Namespace, + NodeSelector: mgha.Spec.NodeSelector, + Tolerations: mgha.Spec.Tolerations, + LeaseDuration: strconv.Itoa(electionConfig.LeaseDuration), + RenewDeadline: strconv.Itoa(electionConfig.RenewDeadline), + RetryPeriod: strconv.Itoa(electionConfig.RetryPeriod), + AgentQPS: agentQPS, + AgentBurst: agentBurst, + ClusterId: string(infra.GetUID()), + Resources: &resourceReq, + }, nil + }) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to render standalone agent objects: %v", err) + } + if err = utils.ManipulateGlobalHubObjects(agentObjects, mgha, hohDeployer, mapper, s.GetScheme()); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create/update standalone agent objects: %v", err) + } + return ctrl.Result{}, nil +} diff --git a/operator/pkg/controllers/manager/manifests/global-hub-kafka-topic.yaml b/operator/pkg/controllers/manager/manifests/global-hub-kafka-topic.yaml deleted file mode 100644 index e69de29bb..000000000 diff --git a/operator/pkg/controllers/manager/manifests/global-hub-kafka-user.yaml b/operator/pkg/controllers/manager/manifests/global-hub-kafka-user.yaml deleted file mode 100644 index e69de29bb..000000000 diff --git a/operator/pkg/controllers/meta.go b/operator/pkg/controllers/meta.go index 2a3c91ecc..7d9d22f71 100644 --- a/operator/pkg/controllers/meta.go +++ b/operator/pkg/controllers/meta.go @@ -32,8 +32,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" + "github.com/stolostron/multicluster-global-hub/operator/api/operator/v1alpha1" "github.com/stolostron/multicluster-global-hub/operator/api/operator/v1alpha4" "github.com/stolostron/multicluster-global-hub/operator/pkg/config" "github.com/stolostron/multicluster-global-hub/operator/pkg/controllers/acm" @@ -78,23 +80,39 @@ type MetaController struct { imageClient *imagev1client.ImageV1Client mgr manager.Manager operatorConfig *config.OperatorConfig - upgraded bool startedControllerMap map[string]config.ControllerInterface } -// +kubebuilder:rbac:groups=operator.open-cluster-management.io,resources=multiclusterglobalhubs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=operator.open-cluster-management.io,resources=multiclusterglobalhubs;multiclusterglobalhubagents,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=operator.open-cluster-management.io,resources=multiclusterglobalhubs/status,verbs=get;update;patch // +kubebuilder:rbac:groups=operator.open-cluster-management.io,resources=multiclusterglobalhubs/finalizers,verbs=update -// +kubebuilder:rbac:groups=policy.open-cluster-management.io,resources=policies,verbs=get;list;patch;update - -func (r *MetaController) Reconcile(ctx context.Context, req ctrl.Request, -) (ctrl.Result, error) { +// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch;create;update;delete +// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;delete +// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch;create;update;delete +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete;patch +// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;delete +// +kubebuilder:rbac:groups="apps",resources=deployments,verbs=get;list;watch;create;update;delete +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=roles;rolebindings;clusterroles;clusterrolebindings,verbs=get;list;watch;create;update;delete + +func (r *MetaController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { // Check if mgh exist or deleting mgh, err := config.GetMulticlusterGlobalHub(ctx, r.client) if err != nil { return ctrl.Result{}, err } + mgha, err := config.GetMulticlusterGlobalHubAgent(ctx, r.client) + if err != nil { + return ctrl.Result{}, err + } + if mgha != nil { + // deploy global hub agent + if err := agent.StartStandaloneAgentController(ctx, r.mgr); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + if mgh == nil { return ctrl.Result{}, nil } @@ -114,6 +132,10 @@ func (r *MetaController) Reconcile(ctx context.Context, req ctrl.Request, Reason: config.CONDITION_REASON_GLOBALHUB_UNINSTALL, Message: config.CONDITION_MESSAGE_GLOBALHUB_UNINSTALL, }, v1alpha4.GlobalHubUninstalling) + if err != nil { + return ctrl.Result{}, err + } + _, err = r.pruneGlobalHubResources(ctx) if err != nil { return ctrl.Result{}, err @@ -214,6 +236,8 @@ func (r *MetaController) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr).Named("MetaController"). For(&v1alpha4.MulticlusterGlobalHub{}, builder.WithPredicates(config.MGHPred)). + Watches(&v1alpha1.MulticlusterGlobalHubAgent{}, + &handler.EnqueueRequestForObject{}). Complete(r) } diff --git a/operator/pkg/utils/utils.go b/operator/pkg/utils/utils.go index 21131a445..6a2845e69 100644 --- a/operator/pkg/utils/utils.go +++ b/operator/pkg/utils/utils.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -45,6 +46,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "github.com/stolostron/multicluster-global-hub/operator/api/operator/shared" "github.com/stolostron/multicluster-global-hub/operator/api/operator/v1alpha4" "github.com/stolostron/multicluster-global-hub/operator/pkg/config" operatorconstants "github.com/stolostron/multicluster-global-hub/operator/pkg/constants" @@ -289,42 +291,41 @@ func GetResources(component string, advanced *v1alpha4.AdvancedSpec) *corev1.Res requests[corev1.ResourceName(corev1.ResourceMemory)] = resource.MustParse(operatorconstants.GrafanaMemoryRequest) requests[corev1.ResourceName(corev1.ResourceCPU)] = resource.MustParse(operatorconstants.GrafanaCPURequest) if advanced != nil && advanced.Grafana != nil { - setResourcesFromCR(advanced.Grafana.Resources, requests) + SetResourcesFromCR(advanced.Grafana.Resources, requests) } case operatorconstants.Postgres: requests[corev1.ResourceName(corev1.ResourceMemory)] = resource.MustParse(operatorconstants.PostgresMemoryRequest) requests[corev1.ResourceName(corev1.ResourceCPU)] = resource.MustParse(operatorconstants.PostgresCPURequest) if advanced != nil && advanced.Postgres != nil { - setResourcesFromCR(advanced.Postgres.Resources, requests) + SetResourcesFromCR(advanced.Postgres.Resources, requests) } case operatorconstants.Manager: requests[corev1.ResourceName(corev1.ResourceMemory)] = resource.MustParse(operatorconstants.ManagerMemoryRequest) requests[corev1.ResourceName(corev1.ResourceCPU)] = resource.MustParse(operatorconstants.ManagerCPURequest) if advanced != nil && advanced.Manager != nil { - setResourcesFromCR(advanced.Manager.Resources, requests) + SetResourcesFromCR(advanced.Manager.Resources, requests) } case operatorconstants.Agent: requests[corev1.ResourceName(corev1.ResourceMemory)] = resource.MustParse(operatorconstants.AgentMemoryRequest) requests[corev1.ResourceName(corev1.ResourceCPU)] = resource.MustParse(operatorconstants.AgentCPURequest) if advanced != nil && advanced.Agent != nil { - setResourcesFromCR(advanced.Agent.Resources, requests) + SetResourcesFromCR(advanced.Agent.Resources, requests) } case operatorconstants.Kafka: requests[corev1.ResourceName(corev1.ResourceMemory)] = resource.MustParse(operatorconstants.KafkaMemoryRequest) requests[corev1.ResourceName(corev1.ResourceCPU)] = resource.MustParse(operatorconstants.KafkaCPURequest) if advanced != nil && advanced.Kafka != nil { - setResourcesFromCR(advanced.Kafka.Resources, requests) + SetResourcesFromCR(advanced.Kafka.Resources, requests) } } - resourceReq.Requests = requests return &resourceReq } -func setResourcesFromCR(res *v1alpha4.ResourceRequirements, requests corev1.ResourceList) { +func SetResourcesFromCR(res *shared.ResourceRequirements, requests corev1.ResourceList) { if res != nil { if res.Requests.Memory().String() != "0" { requests[corev1.ResourceName(corev1.ResourceMemory)] = resource.MustParse(res.Requests.Memory().String()) @@ -432,7 +433,7 @@ func FilterManagedCluster(obj client.Object) bool { // ManipulateGlobalHubObjects will attach the owner reference, add specific labels to these objects func ManipulateGlobalHubObjects(objects []*unstructured.Unstructured, - mgh *v1alpha4.MulticlusterGlobalHub, hohDeployer deployer.Deployer, + owner metav1.Object, hohDeployer deployer.Deployer, mapper *restmapper.DeferredDiscoveryRESTMapper, scheme *runtime.Scheme, ) error { // manipulate the object @@ -444,7 +445,7 @@ func ManipulateGlobalHubObjects(objects []*unstructured.Unstructured, if mapping.Scope.Name() == meta.RESTScopeNameNamespace { // for namespaced resource, set ownerreference of controller - if err := controllerutil.SetControllerReference(mgh, obj, scheme); err != nil { + if err := controllerutil.SetControllerReference(owner, obj, scheme); err != nil { return err } } diff --git a/operator/pkg/utils/utils_test.go b/operator/pkg/utils/utils_test.go index 2189501db..ad75c2c17 100644 --- a/operator/pkg/utils/utils_test.go +++ b/operator/pkg/utils/utils_test.go @@ -33,6 +33,7 @@ import ( clusterv1 "open-cluster-management.io/api/cluster/v1" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "github.com/stolostron/multicluster-global-hub/operator/api/operator/shared" "github.com/stolostron/multicluster-global-hub/operator/api/operator/v1alpha4" "github.com/stolostron/multicluster-global-hub/operator/pkg/constants" commonconstants "github.com/stolostron/multicluster-global-hub/pkg/constants" @@ -281,7 +282,7 @@ func Test_GetResources(t *testing.T) { tests := []struct { name string component string - advanced func(resReq *v1alpha4.ResourceRequirements) *v1alpha4.AdvancedSpec + advanced func(resReq *shared.ResourceRequirements) *v1alpha4.AdvancedSpec cpuRequest string memoryRequest string custom bool @@ -295,7 +296,7 @@ func Test_GetResources(t *testing.T) { { name: "Test Grafana with customized values", component: constants.Grafana, - advanced: func(resReq *v1alpha4.ResourceRequirements) *v1alpha4.AdvancedSpec { + advanced: func(resReq *shared.ResourceRequirements) *v1alpha4.AdvancedSpec { return &v1alpha4.AdvancedSpec{ Grafana: &v1alpha4.CommonSpec{ Resources: resReq, @@ -313,7 +314,7 @@ func Test_GetResources(t *testing.T) { { name: "Test Postgres with customized values", component: constants.Postgres, - advanced: func(resReq *v1alpha4.ResourceRequirements) *v1alpha4.AdvancedSpec { + advanced: func(resReq *shared.ResourceRequirements) *v1alpha4.AdvancedSpec { return &v1alpha4.AdvancedSpec{ Postgres: &v1alpha4.CommonSpec{ Resources: resReq, @@ -331,7 +332,7 @@ func Test_GetResources(t *testing.T) { { name: "Test Agent with customized values", component: constants.Agent, - advanced: func(resReq *v1alpha4.ResourceRequirements) *v1alpha4.AdvancedSpec { + advanced: func(resReq *shared.ResourceRequirements) *v1alpha4.AdvancedSpec { return &v1alpha4.AdvancedSpec{ Agent: &v1alpha4.CommonSpec{ Resources: resReq, @@ -349,7 +350,7 @@ func Test_GetResources(t *testing.T) { { name: "Test Manager with customized values", component: constants.Manager, - advanced: func(resReq *v1alpha4.ResourceRequirements) *v1alpha4.AdvancedSpec { + advanced: func(resReq *shared.ResourceRequirements) *v1alpha4.AdvancedSpec { return &v1alpha4.AdvancedSpec{ Manager: &v1alpha4.CommonSpec{ Resources: resReq, @@ -367,7 +368,7 @@ func Test_GetResources(t *testing.T) { { name: "Test Kafka with customized values", component: constants.Kafka, - advanced: func(resReq *v1alpha4.ResourceRequirements) *v1alpha4.AdvancedSpec { + advanced: func(resReq *shared.ResourceRequirements) *v1alpha4.AdvancedSpec { return &v1alpha4.AdvancedSpec{ Kafka: &v1alpha4.CommonSpec{ Resources: resReq, @@ -378,7 +379,7 @@ func Test_GetResources(t *testing.T) { }, } - resReq := &v1alpha4.ResourceRequirements{ + resReq := &shared.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceName(corev1.ResourceMemory): resource.MustParse(customMemoryRequest), corev1.ResourceName(corev1.ResourceCPU): resource.MustParse(customCPURequest), diff --git a/test/integration/operator/controllers/agent/cluster_default_addon_test.go b/test/integration/operator/controllers/agent/cluster_default_addon_test.go index c47e406a2..9378f111d 100644 --- a/test/integration/operator/controllers/agent/cluster_default_addon_test.go +++ b/test/integration/operator/controllers/agent/cluster_default_addon_test.go @@ -30,7 +30,7 @@ var _ = Describe("deploy default addon", func() { []clusterv1.ManagedClusterClaim{}, clusterAvailableCondition) - By("By checking the addon CR is is created in the cluster ns") + By("By checking the addon CR is created in the cluster ns") addon := &addonv1alpha1.ManagedClusterAddOn{} Eventually(func() error { return runtimeClient.Get(ctx, types.NamespacedName{ diff --git a/test/integration/operator/controllers/agent/standalone_agent/standalone_agent_test.go b/test/integration/operator/controllers/agent/standalone_agent/standalone_agent_test.go new file mode 100644 index 000000000..95428081d --- /dev/null +++ b/test/integration/operator/controllers/agent/standalone_agent/standalone_agent_test.go @@ -0,0 +1,98 @@ +package agent + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + configv1 "github.com/openshift/api/config/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + globalhubv1alpha1 "github.com/stolostron/multicluster-global-hub/operator/api/operator/v1alpha1" +) + +// go test ./test/integration/operator/controllers/agent/standalone_agent -ginkgo.focus "standalone agent" -v +var _ = Describe("standalone agent", func() { + It("Should create standalone agent in the default namespace", func() { + By("Creating multiclusterglobalhubagent CR") + mgha := &globalhubv1alpha1.MulticlusterGlobalHubAgent{ + ObjectMeta: metav1.ObjectMeta{ + Name: "multiclusterglobalhubagent", + Namespace: "default", + }, + Spec: globalhubv1alpha1.MulticlusterGlobalHubAgentSpec{ + ImagePullSecret: "test-pull-secret", + TransportConfigSecretName: "transport-secret", + }, + } + Expect(runtimeClient.Create(ctx, mgha)).Should(Succeed()) + + By("Creating OpenShift Infrastructure CR") + infra := &configv1.Infrastructure{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + } + Expect(runtimeClient.Create(ctx, infra)).Should(Succeed()) + + By("By checking the GH agent is created in default namespace") + agentDeployment := &appsv1.Deployment{} + Eventually(func() error { + return runtimeClient.Get(ctx, types.NamespacedName{ + Name: "multicluster-global-hub-agent", + Namespace: "default", + }, agentDeployment) + }, time.Second*10, time.Second*1).ShouldNot(HaveOccurred()) + + By("By checking the GH agent serviceaccount is created in default namespace") + agentSA := &corev1.ServiceAccount{} + Eventually(func() error { + return runtimeClient.Get(ctx, types.NamespacedName{ + Name: "multicluster-global-hub-agent", + Namespace: "default", + }, agentSA) + }, time.Second*10, time.Second*1).ShouldNot(HaveOccurred()) + + By("By checking the GH agent configmap is created in default namespace") + agentCM := &corev1.ConfigMap{} + Eventually(func() error { + return runtimeClient.Get(ctx, types.NamespacedName{ + Name: "multicluster-global-hub-agent-config", + Namespace: "default", + }, agentCM) + }, time.Second*10, time.Second*1).ShouldNot(HaveOccurred()) + + By("By checking the GH agent clusterrole is created in default namespace") + agentClusterRole := &rbacv1.ClusterRole{} + Eventually(func() error { + return runtimeClient.Get(ctx, types.NamespacedName{ + Name: "multicluster-global-hub:multicluster-global-hub-agent", + }, agentClusterRole) + }, time.Second*10, time.Second*1).ShouldNot(HaveOccurred()) + + By("By checking the GH agent clusterrolebinding is created in default namespace") + agentClusterRoleBinding := &rbacv1.ClusterRoleBinding{} + Eventually(func() error { + return runtimeClient.Get(ctx, types.NamespacedName{ + Name: "multicluster-global-hub:multicluster-global-hub-agent", + }, agentClusterRoleBinding) + }, time.Second*10, time.Second*1).ShouldNot(HaveOccurred()) + + By("Removed the clusterrolebinding") + originClusterRoleBindingId := agentClusterRoleBinding.GetUID() + Expect(runtimeClient.Delete(ctx, agentClusterRoleBinding)).Should(Succeed()) + + By("By checking the GH agent clusterrolebinding is re-created in default namespace") + agentClusterRoleBinding = &rbacv1.ClusterRoleBinding{} + Eventually(func() bool { + runtimeClient.Get(ctx, types.NamespacedName{ + Name: "multicluster-global-hub:multicluster-global-hub-agent", + }, agentClusterRoleBinding) + return agentClusterRoleBinding.GetUID() != originClusterRoleBindingId + }, time.Second*10, time.Second*1).Should(BeTrue()) + }) +}) diff --git a/test/integration/operator/controllers/agent/standalone_agent/suite_test.go b/test/integration/operator/controllers/agent/standalone_agent/suite_test.go new file mode 100644 index 000000000..9116e425c --- /dev/null +++ b/test/integration/operator/controllers/agent/standalone_agent/suite_test.go @@ -0,0 +1,116 @@ +/* +Copyright 2022. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package agent + +import ( + "context" + "os" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + "github.com/stolostron/multicluster-global-hub/operator/pkg/config" + "github.com/stolostron/multicluster-global-hub/operator/pkg/controllers/agent" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + cfg *rest.Config + runtimeClient client.Client // You'll be using this client in your tests. + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc +) + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Standalone Agent Controller Integration Suite") +} + +var _ = BeforeSuite(func() { + Expect(os.Setenv("POD_NAMESPACE", "default")).To(Succeed()) + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDInstallOptions: envtest.CRDInstallOptions{ + Paths: []string{ + filepath.Join("..", "..", "..", "..", "..", "..", "operator", "config", "crd", "bases"), + filepath.Join("..", "..", "..", "..", "..", "manifest", "crd"), + }, + MaxTime: 1 * time.Minute, + }, + ErrorIfCRDPathMissing: true, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + // add scheme + runtimeScheme := config.GetRuntimeScheme() + runtimeClient, err = client.New(cfg, client.Options{Scheme: runtimeScheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(runtimeClient).NotTo(BeNil()) + + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Metrics: metricsserver.Options{ + BindAddress: "0", // disable the metrics serving + }, + Scheme: runtimeScheme, + NewCache: config.InitCache, + LeaderElection: false, + }) + Expect(err).ToNot(HaveOccurred()) + Expect(k8sManager).ToNot(BeNil()) + + By("start the standalone agent controller to manager") + Expect(agent.StartStandaloneAgentController(ctx, k8sManager)).ToNot(HaveOccurred()) + + go func() { + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() + Expect(k8sManager.GetCache().WaitForCacheSync(ctx)).To(BeTrue()) +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + // https://github.com/kubernetes-sigs/controller-runtime/issues/1571 + // Set 4 with random + if err != nil { + time.Sleep(4 * time.Second) + } + err = testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/test/integration/operator/controllers/transporter_test.go b/test/integration/operator/controllers/transporter_test.go index 1ba1adddd..cbc6f4bd3 100644 --- a/test/integration/operator/controllers/transporter_test.go +++ b/test/integration/operator/controllers/transporter_test.go @@ -22,6 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/stolostron/multicluster-global-hub/operator/api/operator/shared" "github.com/stolostron/multicluster-global-hub/operator/api/operator/v1alpha4" "github.com/stolostron/multicluster-global-hub/operator/pkg/config" operatortrans "github.com/stolostron/multicluster-global-hub/operator/pkg/controllers/transporter" @@ -247,7 +248,7 @@ var _ = Describe("transporter", Ordered, func() { customMemoryRequest := "1Mi" mgh.Spec.AdvancedSpec = &v1alpha4.AdvancedSpec{ Kafka: &v1alpha4.CommonSpec{ - Resources: &v1alpha4.ResourceRequirements{ + Resources: &shared.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceName(corev1.ResourceMemory): resource.MustParse(customMemoryRequest), corev1.ResourceName(corev1.ResourceCPU): resource.MustParse(customCPURequest), diff --git a/test/manifest/crd/0000_03_config.openshift.io_infrastructures.crd.yaml b/test/manifest/crd/0000_03_config.openshift.io_infrastructures.crd.yaml new file mode 100644 index 000000000..46446f94f --- /dev/null +++ b/test/manifest/crd/0000_03_config.openshift.io_infrastructures.crd.yaml @@ -0,0 +1,1858 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: Default + name: infrastructures.config.openshift.io +spec: + conversion: + strategy: None + group: config.openshift.io + names: + kind: Infrastructure + listKind: InfrastructureList + plural: infrastructures + singular: infrastructure + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Infrastructure holds cluster-wide information about Infrastructure. + \ The canonical name is `cluster` \n Compatibility level 1: Stable within + a major release for a minimum of 12 months or 3 minor releases (whichever + is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + cloudConfig: + description: "cloudConfig is a reference to a ConfigMap containing + the cloud provider configuration file. This configuration file is + used to configure the Kubernetes cloud provider integration when + using the built-in cloud provider integration or the external cloud + controller manager. The namespace for this config map is openshift-config. + \n cloudConfig should only be consumed by the kube_cloud_config + controller. The controller is responsible for using the user configuration + in the spec for various platforms and combining that with the user + provided ConfigMap in this field to create a stitched kube cloud + config. The controller generates a ConfigMap `kube-cloud-config` + in `openshift-config-managed` namespace with the kube cloud config + is stored in `cloud.conf` key. All the clients are expected to use + the generated ConfigMap only." + properties: + key: + description: Key allows pointing to a specific key/value inside + of the configmap. This is useful for logical file references. + type: string + name: + type: string + type: object + platformSpec: + description: platformSpec holds desired information specific to the + underlying infrastructure provider. + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to the Alibaba + Cloud infrastructure provider. + type: object + aws: + description: AWS contains settings specific to the Amazon Web + Services infrastructure provider. + properties: + serviceEndpoints: + description: serviceEndpoints list contains custom endpoints + which will override default service endpoint of AWS Services. + There must be only one ServiceEndpoint for a service. + items: + description: AWSServiceEndpoint store the configuration + of a custom url to override existing defaults of AWS Services. + properties: + name: + description: name is the name of the AWS service. The + list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html + This must be provided and cannot be empty. + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme + https, that overrides the default generated endpoint + for a client. This must be provided and cannot be + empty. + pattern: ^https:// + type: string + type: object + type: array + x-kubernetes-list-type: atomic + type: object + azure: + description: Azure contains settings specific to the Azure infrastructure + provider. + type: object + baremetal: + description: BareMetal contains settings specific to the BareMetal + platform. + type: object + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix + Metal infrastructure provider. + type: object + external: + description: ExternalPlatformType represents generic infrastructure + provider. Platform-specific components should be supplemented + separately. + properties: + platformName: + default: Unknown + description: PlatformName holds the arbitrary string representing + the infrastructure provider name, expected to be set at + the installation time. This field is solely for informational + and reporting purposes and is not expected to be used for + decision-making. + type: string + x-kubernetes-validations: + - message: platform name cannot be changed once set + rule: oldSelf == 'Unknown' || self == oldSelf + type: object + gcp: + description: GCP contains settings specific to the Google Cloud + Platform infrastructure provider. + type: object + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud + infrastructure provider. + type: object + kubevirt: + description: Kubevirt contains settings specific to the kubevirt + infrastructure provider. + type: object + nutanix: + description: Nutanix contains settings specific to the Nutanix + infrastructure provider. + properties: + failureDomains: + description: failureDomains configures failure domains information + for the Nutanix platform. When set, the failure domains + defined here may be used to spread Machines across prism + element clusters to improve fault tolerance of the cluster. + items: + description: NutanixFailureDomain configures failure domain + information for the Nutanix platform. + properties: + cluster: + description: cluster is to identify the cluster (the + Prism Element under management of the Prism Central), + in which the Machine's VM will be created. The cluster + identifier (uuid or name) can be obtained from the + Prism Central console or using the prism_central API. + properties: + name: + description: name is the resource name in the PC. + It cannot be empty if the type is Name. + type: string + type: + description: type is the identifier type to use + for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource in + the PC. It cannot be empty if the type is UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required when type + is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) + : !has(self.uuid)' + - message: name configuration is required when type + is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) + : !has(self.name)' + name: + description: name defines the unique name of a failure + domain. Name is required and must be at most 64 characters + in length. It must consist of only lower case alphanumeric + characters and hyphens (-). It must start and end + with an alphanumeric character. This value is arbitrary + and is used to identify the failure domain within + the platform. + maxLength: 64 + minLength: 1 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + subnets: + description: subnets holds a list of identifiers (one + or more) of the cluster's network subnets for the + Machine's VM to connect to. The subnet identifiers + (uuid or name) can be obtained from the Prism Central + console or using the prism_central API. + items: + description: NutanixResourceIdentifier holds the identity + of a Nutanix PC resource (cluster, image, subnet, + etc.) + properties: + name: + description: name is the resource name in the + PC. It cannot be empty if the type is Name. + type: string + type: + description: type is the identifier type to use + for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource + in the PC. It cannot be empty if the type is + UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required when type + is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) + : !has(self.uuid)' + - message: name configuration is required when type + is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) + : !has(self.name)' + maxItems: 1 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + required: + - cluster + - name + - subnets + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + prismCentral: + description: prismCentral holds the endpoint address and port + to access the Nutanix Prism Central. When a cluster-wide + proxy is installed, by default, this endpoint will be accessed + via the proxy. Should you wish for communication with this + endpoint not to be proxied, please add the endpoint to the + proxy spec.noProxy list. + properties: + address: + description: address is the endpoint address (DNS name + or IP address) of the Nutanix Prism Central or Element + (cluster) + maxLength: 256 + type: string + port: + description: port is the port number to access the Nutanix + Prism Central or Element (cluster) + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - address + - port + type: object + prismElements: + description: prismElements holds one or more endpoint address + and port data to access the Nutanix Prism Elements (clusters) + of the Nutanix Prism Central. Currently we only support + one Prism Element (cluster) for an OpenShift cluster, where + all the Nutanix resources (VMs, subnets, volumes, etc.) + used in the OpenShift cluster are located. In the future, + we may support Nutanix resources (VMs, etc.) spread over + multiple Prism Elements (clusters) of the Prism Central. + items: + description: NutanixPrismElementEndpoint holds the name + and endpoint data for a Prism Element (cluster) + properties: + endpoint: + description: endpoint holds the endpoint address and + port data of the Prism Element (cluster). When a cluster-wide + proxy is installed, by default, this endpoint will + be accessed via the proxy. Should you wish for communication + with this endpoint not to be proxied, please add the + endpoint to the proxy spec.noProxy list. + properties: + address: + description: address is the endpoint address (DNS + name or IP address) of the Nutanix Prism Central + or Element (cluster) + maxLength: 256 + type: string + port: + description: port is the port number to access the + Nutanix Prism Central or Element (cluster) + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - address + - port + type: object + name: + description: name is the name of the Prism Element (cluster). + This value will correspond with the cluster field + configured on other resources (eg Machines, PVCs, + etc). + maxLength: 256 + type: string + required: + - endpoint + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - prismCentral + - prismElements + type: object + openstack: + description: OpenStack contains settings specific to the OpenStack + infrastructure provider. + type: object + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure + provider. + type: object + powervs: + description: PowerVS contains settings specific to the IBM Power + Systems Virtual Servers infrastructure provider. + properties: + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints + which will override the default service endpoints of a Power + VS service. + items: + description: PowervsServiceEndpoint stores the configuration + of a custom url to override existing defaults of PowerVS + Services. + properties: + name: + description: name is the name of the Power VS service. + Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api + ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller + Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme + https, that overrides the default generated endpoint + for a client. This must be provided and cannot be + empty. + format: uri + pattern: ^https:// + type: string + required: + - name + - url + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: + description: type is the underlying infrastructure provider for + the cluster. This value controls whether infrastructure automation + such as service load balancers, dynamic volume provisioning, + machine creation and deletion, and other integrations are enabled. + If None, no infrastructure automation is enabled. Allowed values + are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", + "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", + "Nutanix" and "None". Individual components may not support + all platforms, and must handle unrecognized platforms as None + if they do not support that platform. + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + vsphere: + description: VSphere contains settings specific to the VSphere + infrastructure provider. + properties: + failureDomains: + description: failureDomains contains the definition of region, + zone and the vCenter topology. If this is omitted failure + domains (regions and zones) will not be used. + items: + description: VSpherePlatformFailureDomainSpec holds the + region and zone failure domain and the vCenter topology + of that failure domain. + properties: + name: + description: name defines the arbitrary but unique name + of a failure domain. + maxLength: 256 + minLength: 1 + type: string + region: + description: region defines the name of a region tag + that will be attached to a vCenter datacenter. The + tag category in vCenter must be named openshift-region. + maxLength: 80 + minLength: 1 + type: string + server: + anyOf: + - format: ipv4 + - format: ipv6 + - format: hostname + description: server is the fully-qualified domain name + or the IP address of the vCenter server. --- + maxLength: 255 + minLength: 1 + type: string + topology: + description: Topology describes a given failure domain + using vSphere constructs + properties: + computeCluster: + description: computeCluster the absolute path of + the vCenter cluster in which virtual machine will + be located. The absolute path is of the form //host/. + The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/host/.*? + type: string + datacenter: + description: datacenter is the name of vCenter datacenter + in which virtual machines will be located. The + maximum length of the datacenter name is 80 characters. + maxLength: 80 + type: string + datastore: + description: datastore is the absolute path of the + datastore in which the virtual machine is located. + The absolute path is of the form //datastore/ + The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/datastore/.*? + type: string + folder: + description: folder is the absolute path of the + folder where virtual machines are located. The + absolute path is of the form //vm/. + The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/vm/.*? + type: string + networks: + description: networks is the list of port group + network names within this failure domain. Currently, + we only support a single interface per RHCOS virtual + machine. The available networks (port groups) + can be listed using `govc ls 'network/*'` The + single interface should be the absolute path of + the form //network/. + items: + type: string + maxItems: 1 + minItems: 1 + type: array + x-kubernetes-list-type: atomic + resourcePool: + description: resourcePool is the absolute path of + the resource pool where virtual machines will + be created. The absolute path is of the form //host//Resources/. + The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/host/.*?/Resources.* + type: string + template: + description: "template is the full inventory path + of the virtual machine or template that will be + cloned when creating new machines in this failure + domain. The maximum length of the path is 2048 + characters. \n When omitted, the template will + be calculated by the control plane machineset + operator based on the region and zone defined + in VSpherePlatformFailureDomainSpec. For example, + for zone=zonea, region=region1, and infrastructure + name=test, the template path would be calculated + as //vm/test-rhcos-region1-zonea." + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + required: + - computeCluster + - datacenter + - datastore + - networks + type: object + zone: + description: zone defines the name of a zone tag that + will be attached to a vCenter cluster. The tag category + in vCenter must be named openshift-zone. + maxLength: 80 + minLength: 1 + type: string + required: + - name + - region + - server + - topology + - zone + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeNetworking: + description: nodeNetworking contains the definition of internal + and external network constraints for assigning the node's + networking. If this field is omitted, networking defaults + to the legacy address selection behavior which is to only + support a single address and return the first one found. + properties: + external: + description: external represents the network configuration + of the node that is externally routable. + properties: + excludeNetworkSubnetCidr: + description: excludeNetworkSubnetCidr IP addresses + in subnet ranges will be excluded when selecting + the IP address from the VirtualMachine's VM for + use in the status.addresses fields. --- + items: + format: cidr + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: network VirtualMachine's VM Network names + that will be used to when searching for status.addresses + fields. Note that if internal.networkSubnetCIDR + and external.networkSubnetCIDR are not set, then + the vNIC associated to this network must only have + a single IP address assigned to it. The available + networks (port groups) can be listed using `govc + ls 'network/*'` + type: string + networkSubnetCidr: + description: networkSubnetCidr IP address on VirtualMachine's + network interfaces included in the fields' CIDRs + that will be used in respective status.addresses + fields. --- + items: + format: cidr + type: string + type: array + x-kubernetes-list-type: set + type: object + internal: + description: internal represents the network configuration + of the node that is routable only within the cluster. + properties: + excludeNetworkSubnetCidr: + description: excludeNetworkSubnetCidr IP addresses + in subnet ranges will be excluded when selecting + the IP address from the VirtualMachine's VM for + use in the status.addresses fields. --- + items: + format: cidr + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: network VirtualMachine's VM Network names + that will be used to when searching for status.addresses + fields. Note that if internal.networkSubnetCIDR + and external.networkSubnetCIDR are not set, then + the vNIC associated to this network must only have + a single IP address assigned to it. The available + networks (port groups) can be listed using `govc + ls 'network/*'` + type: string + networkSubnetCidr: + description: networkSubnetCidr IP address on VirtualMachine's + network interfaces included in the fields' CIDRs + that will be used in respective status.addresses + fields. --- + items: + format: cidr + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + vcenters: + description: vcenters holds the connection details for services + to communicate with vCenter. Currently, only a single vCenter + is supported. --- + items: + description: VSpherePlatformVCenterSpec stores the vCenter + connection fields. This is used by the vSphere CCM. + properties: + datacenters: + description: The vCenter Datacenters in which the RHCOS + vm guests are located. This field will be used by + the Cloud Controller Manager. Each datacenter listed + here should be used within a topology. + items: + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + port: + description: port is the TCP port that will be used + to communicate to the vCenter endpoint. When omitted, + this means the user has no opinion and it is up to + the platform to choose a sensible default, which is + subject to change over time. + format: int32 + maximum: 32767 + minimum: 1 + type: integer + server: + anyOf: + - format: ipv4 + - format: ipv6 + - format: hostname + description: server is the fully-qualified domain name + or the IP address of the vCenter server. --- + maxLength: 255 + type: string + required: + - datacenters + - server + type: object + maxItems: 1 + minItems: 0 + type: array + x-kubernetes-list-type: atomic + type: object + type: object + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + properties: + apiServerInternalURI: + description: apiServerInternalURL is a valid URI with scheme 'https', + address and optionally a port (defaulting to 443). apiServerInternalURL + can be used by components like kubelets, to contact the Kubernetes + API server using the infrastructure provider rather than Kubernetes + networking. + type: string + apiServerURL: + description: apiServerURL is a valid URI with scheme 'https', address + and optionally a port (defaulting to 443). apiServerURL can be + used by components like the web console to tell users where to find + the Kubernetes API. + type: string + controlPlaneTopology: + default: HighlyAvailable + description: controlPlaneTopology expresses the expectations for operands + that normally run on control nodes. The default is 'HighlyAvailable', + which represents the behavior operators have in a "normal" cluster. + The 'SingleReplica' mode will be used in single-node deployments + and the operators should not configure the operand for highly-available + operation The 'External' mode indicates that the control plane is + hosted externally to the cluster and that its components are not + visible within the cluster. + enum: + - HighlyAvailable + - SingleReplica + - External + type: string + cpuPartitioning: + default: None + description: cpuPartitioning expresses if CPU partitioning is a currently + enabled feature in the cluster. CPU Partitioning means that this + cluster can support partitioning workloads to specific CPU Sets. + Valid values are "None" and "AllNodes". When omitted, the default + value is "None". The default value of "None" indicates that no nodes + will be setup with CPU partitioning. The "AllNodes" value indicates + that all nodes have been setup with CPU partitioning, and can then + be further configured via the PerformanceProfile API. + enum: + - None + - AllNodes + type: string + etcdDiscoveryDomain: + description: 'etcdDiscoveryDomain is the domain used to fetch the + SRV records for discovering etcd servers and clients. For more info: + https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery + deprecated: as of 4.7, this field is no longer set or honored. It + will be removed in a future release.' + type: string + infrastructureName: + description: infrastructureName uniquely identifies a cluster with + a human friendly name. Once set it should not be changed. Must be + of max length 27 and must have only alphanumeric or hyphen characters. + type: string + infrastructureTopology: + default: HighlyAvailable + description: 'infrastructureTopology expresses the expectations for + infrastructure services that do not run on control plane nodes, + usually indicated by a node selector for a `role` value other than + `master`. The default is ''HighlyAvailable'', which represents the + behavior operators have in a "normal" cluster. The ''SingleReplica'' + mode will be used in single-node deployments and the operators should + not configure the operand for highly-available operation NOTE: External + topology mode is not applicable for this field.' + enum: + - HighlyAvailable + - SingleReplica + type: string + platform: + description: "platform is the underlying infrastructure provider for + the cluster. \n Deprecated: Use platformStatus.type instead." + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + platformStatus: + description: platformStatus holds status information specific to the + underlying infrastructure provider. + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to the Alibaba + Cloud infrastructure provider. + properties: + region: + description: region specifies the region for Alibaba Cloud + resources created for the cluster. + pattern: ^[0-9A-Za-z-]+$ + type: string + resourceGroupID: + description: resourceGroupID is the ID of the resource group + for the cluster. + pattern: ^(rg-[0-9A-Za-z]+)?$ + type: string + resourceTags: + description: resourceTags is a list of additional tags to + apply to Alibaba Cloud resources created for the cluster. + items: + description: AlibabaCloudResourceTag is the set of tags + to add to apply to resources. + properties: + key: + description: key is the key of the tag. + maxLength: 128 + minLength: 1 + type: string + value: + description: value is the value of the tag. + maxLength: 128 + minLength: 1 + type: string + required: + - key + - value + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + required: + - region + type: object + aws: + description: AWS contains settings specific to the Amazon Web + Services infrastructure provider. + properties: + region: + description: region holds the default AWS region for new AWS + resources created by the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional tags to + apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html + for information on tagging AWS resources. AWS supports a + maximum of 50 tags per resource. OpenShift reserves 25 tags + for its use, leaving 25 tags available for the user. + items: + description: AWSResourceTag is a tag to apply to AWS resources + created for the cluster. + properties: + key: + description: key is the key of the tag + maxLength: 128 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + type: string + value: + description: value is the value of the tag. Some AWS + service do not support empty values. Since tags are + added to resources in many services, the length of + the tag value must meet the requirements of all services. + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + type: string + required: + - key + - value + type: object + maxItems: 25 + type: array + x-kubernetes-list-type: atomic + serviceEndpoints: + description: ServiceEndpoints list contains custom endpoints + which will override default service endpoint of AWS Services. + There must be only one ServiceEndpoint for a service. + items: + description: AWSServiceEndpoint store the configuration + of a custom url to override existing defaults of AWS Services. + properties: + name: + description: name is the name of the AWS service. The + list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html + This must be provided and cannot be empty. + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme + https, that overrides the default generated endpoint + for a client. This must be provided and cannot be + empty. + pattern: ^https:// + type: string + type: object + type: array + x-kubernetes-list-type: atomic + type: object + azure: + description: Azure contains settings specific to the Azure infrastructure + provider. + properties: + armEndpoint: + description: armEndpoint specifies a URL to use for resource + management in non-soverign clouds such as Azure Stack. + type: string + cloudName: + description: cloudName is the name of the Azure cloud environment + which can be used to configure the Azure SDK with the appropriate + Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. + enum: + - "" + - AzurePublicCloud + - AzureUSGovernmentCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureStackCloud + type: string + networkResourceGroupName: + description: networkResourceGroupName is the Resource Group + for network resources like the Virtual Network and Subnets + used by the cluster. If empty, the value is same as ResourceGroupName. + type: string + resourceGroupName: + description: resourceGroupName is the Resource Group for new + Azure resources created for the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional tags to + apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags + for information on tagging Azure resources. Due to limitations + on Automation, Content Delivery Network, DNS Azure resources, + a maximum of 15 tags may be applied. OpenShift reserves + 5 tags for internal use, allowing 10 tags for user configuration. + items: + description: AzureResourceTag is a tag to apply to Azure + resources created for the cluster. + properties: + key: + description: key is the key part of the tag. A tag key + can have a maximum of 128 characters and cannot be + empty. Key must begin with a letter, end with a letter, + number or underscore, and must contain only alphanumeric + characters and the following special characters `_ + . -`. + maxLength: 128 + minLength: 1 + pattern: ^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$ + type: string + value: + description: 'value is the value part of the tag. A + tag value can have a maximum of 256 characters and + cannot be empty. Value must contain only alphanumeric + characters and the following special characters `_ + + , - . / : ; < = > ? @`.' + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.=+-@]+$ + type: string + required: + - key + - value + type: object + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: resourceTags are immutable and may only be configured + during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) + type: object + x-kubernetes-validations: + - message: resourceTags may only be configured during installation + rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) + || has(oldSelf.resourceTags) && has(self.resourceTags)' + baremetal: + description: BareMetal contains settings specific to the BareMetal + platform. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to + contact the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. These are the IPs for + a self-hosted load balancer in front of the API servers. + In dual stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes to + the default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to + the default ingress controller. The IPs are suitable targets + of a wildcard DNS record used to resolve default route host + names. In dual stack clusters this list contains two IPs + otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used + by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used + by the cluster on BareMetal platform which can be a + user-managed or openshift-managed load balancer that + is to be used for the OpenShift API and Ingress endpoints. + When set to OpenShiftManagedDefault the static pods + in charge of API and Ingress traffic load-balancing + defined in the machine config operator will be deployed. + When set to UserManaged these static pods will not be + deployed and it is expected that the load balancer is + configured out of band by the deployer. When omitted, + this means no opinion and the platform is left to choose + a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal + DNS used by the nodes. Unlike the one managed by the DNS + operator, `NodeDNSIP` provides name resolution for the nodes + themselves. There is no DNS-as-a-service for BareMetal deployments. + In order to minimize necessary changes to the datacenter + DNS, a DNS service is hosted as a static pod to serve those + hostnames to the nodes in the cluster. + type: string + type: object + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix + Metal infrastructure provider. + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the + default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. + type: string + type: object + external: + description: External contains settings specific to the generic + External infrastructure provider. + properties: + cloudControllerManager: + description: cloudControllerManager contains settings specific + to the external Cloud Controller Manager (a.k.a. CCM or + CPI). When omitted, new nodes will be not tainted and no + extra initialization from the cloud controller manager is + expected. + properties: + state: + description: "state determines whether or not an external + Cloud Controller Manager is expected to be installed + within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager + \n Valid values are \"External\", \"None\" and omitted. + When set to \"External\", new nodes will be tainted + as uninitialized when created, preventing them from + running workloads until they are initialized by the + cloud controller manager. When omitted or set to \"None\", + new nodes will be not tainted and no extra initialization + from the cloud controller manager is expected." + enum: + - "" + - External + - None + type: string + x-kubernetes-validations: + - message: state is immutable once set + rule: self == oldSelf + type: object + x-kubernetes-validations: + - message: state may not be added or removed once set + rule: (has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) + && self.state != "External") + type: object + x-kubernetes-validations: + - message: cloudControllerManager may not be added or removed + once set + rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) + gcp: + description: GCP contains settings specific to the Google Cloud + Platform infrastructure provider. + properties: + cloudLoadBalancerConfig: + default: + dnsType: PlatformDefault + description: cloudLoadBalancerConfig is a union that contains + the IP addresses of API, API-Int and Ingress Load Balancers + created on the cloud platform. These values would not be + populated on on-prem platforms. These Load Balancer IPs + are used to configure the in-cluster DNS instances for API, + API-Int and Ingress services. `dnsType` is expected to be + set to `ClusterHosted` when these Load Balancer IP addresses + are populated and used. + nullable: true + properties: + clusterHosted: + description: clusterHosted holds the IP addresses of API, + API-Int and Ingress Load Balancers on Cloud Platforms. + The DNS solution hosted within the cluster use these + IP addresses to provide resolution for API, API-Int + and Ingress services. + properties: + apiIntLoadBalancerIPs: + description: apiIntLoadBalancerIPs holds Load Balancer + IPs for the internal API service. These Load Balancer + IP addresses can be IPv4 and/or IPv6 addresses. + Entries in the apiIntLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + apiLoadBalancerIPs: + description: apiLoadBalancerIPs holds Load Balancer + IPs for the API service. These Load Balancer IP + addresses can be IPv4 and/or IPv6 addresses. Could + be empty for private clusters. Entries in the apiLoadBalancerIPs + must be unique. A maximum of 16 IP addresses are + permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + ingressLoadBalancerIPs: + description: ingressLoadBalancerIPs holds IPs for + Ingress Load Balancers. These Load Balancer IP addresses + can be IPv4 and/or IPv6 addresses. Entries in the + ingressLoadBalancerIPs must be unique. A maximum + of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + type: object + dnsType: + default: PlatformDefault + description: dnsType indicates the type of DNS solution + in use within the cluster. Its default value of `PlatformDefault` + indicates that the cluster's DNS is the default provided + by the cloud platform. It can be set to `ClusterHosted` + to bypass the configuration of the cloud default DNS. + In this mode, the cluster needs to provide a self-hosted + DNS solution for the cluster's installation to succeed. + The cluster's use of the cloud's Load Balancers is unaffected + by this setting. The value is immutable after it has + been set at install time. Currently, there is no way + for the customer to add additional DNS entries into + the cluster hosted DNS. Enabling this functionality + allows the user to start their own DNS solution outside + the cluster after installation is complete. The customer + would be responsible for configuring this custom DNS + solution, and it can be run in addition to the in-cluster + DNS solution. + enum: + - ClusterHosted + - PlatformDefault + type: string + x-kubernetes-validations: + - message: dnsType is immutable + rule: oldSelf == '' || self == oldSelf + type: object + x-kubernetes-validations: + - message: clusterHosted is permitted only when dnsType is + ClusterHosted + rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' + ? !has(self.clusterHosted) : true' + projectID: + description: resourceGroupName is the Project ID for new GCP + resources created for the cluster. + type: string + region: + description: region holds the region for new GCP resources + created for the cluster. + type: string + resourceLabels: + description: resourceLabels is a list of additional labels + to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources + for information on labeling GCP resources. GCP supports + a maximum of 64 labels per resource. OpenShift reserves + 32 labels for internal use, allowing 32 labels for user + configuration. + items: + description: GCPResourceLabel is a label to apply to GCP + resources created for the cluster. + properties: + key: + description: key is the key part of the label. A label + key can have a maximum of 63 characters and cannot + be empty. Label key must begin with a lowercase letter, + and must contain only lowercase letters, numeric characters, + and the following special characters `_-`. Label key + must not have the reserved prefixes `kubernetes-io` + and `openshift-io`. + maxLength: 63 + minLength: 1 + pattern: ^[a-z][0-9a-z_-]{0,62}$ + type: string + x-kubernetes-validations: + - message: label keys must not start with either `openshift-io` + or `kubernetes-io` + rule: '!self.startsWith(''openshift-io'') && !self.startsWith(''kubernetes-io'')' + value: + description: value is the value part of the label. A + label value can have a maximum of 63 characters and + cannot be empty. Value must contain only lowercase + letters, numeric characters, and the following special + characters `_-`. + maxLength: 63 + minLength: 1 + pattern: ^[0-9a-z_-]{1,63}$ + type: string + required: + - key + - value + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: resourceLabels are immutable and may only be configured + during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) + resourceTags: + description: resourceTags is a list of additional tags to + apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview + for information on tagging GCP resources. GCP supports a + maximum of 50 tags per resource. + items: + description: GCPResourceTag is a tag to apply to GCP resources + created for the cluster. + properties: + key: + description: key is the key part of the tag. A tag key + can have a maximum of 63 characters and cannot be + empty. Tag key must begin and end with an alphanumeric + character, and must contain only uppercase, lowercase + alphanumeric characters, and the following special + characters `._-`. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$ + type: string + parentID: + description: 'parentID is the ID of the hierarchical + resource where the tags are defined, e.g. at the Organization + or the Project level. To find the Organization or + Project ID refer to the following pages: https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, + https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. + An OrganizationID must consist of decimal numbers, + and cannot have leading zeroes. A ProjectID must be + 6 to 30 characters in length, can only contain lowercase + letters, numbers, and hyphens, and must start with + a letter, and cannot end with a hyphen.' + maxLength: 32 + minLength: 1 + pattern: (^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$) + type: string + value: + description: value is the value part of the tag. A tag + value can have a maximum of 63 characters and cannot + be empty. Tag value must begin and end with an alphanumeric + character, and must contain only uppercase, lowercase + alphanumeric characters, and the following special + characters `_-.@%=+:,*#&(){}[]` and spaces. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$ + type: string + required: + - key + - parentID + - value + type: object + maxItems: 50 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: resourceTags are immutable and may only be configured + during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) + type: object + x-kubernetes-validations: + - message: resourceLabels may only be configured during installation + rule: '!has(oldSelf.resourceLabels) && !has(self.resourceLabels) + || has(oldSelf.resourceLabels) && has(self.resourceLabels)' + - message: resourceTags may only be configured during installation + rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) + || has(oldSelf.resourceTags) && has(self.resourceTags)' + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud + infrastructure provider. + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud Internet + Services instance managing the DNS zone for the cluster's + base domain + type: string + dnsInstanceCRN: + description: DNSInstanceCRN is the CRN of the DNS Services + instance managing the DNS zone for the cluster's base domain + type: string + location: + description: Location is where the cluster has been deployed + type: string + providerType: + description: ProviderType indicates the type of cluster that + was created + type: string + resourceGroupName: + description: ResourceGroupName is the Resource Group for new + IBMCloud resources created for the cluster. + type: string + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints + which will override the default service endpoints of an + IBM Cloud service. These endpoints are consumed by components + within the cluster to reach the respective IBM Cloud Services. + items: + description: IBMCloudServiceEndpoint stores the configuration + of a custom url to override existing defaults of IBM Cloud + Services. + properties: + name: + description: 'name is the name of the IBM Cloud service. + Possible values are: CIS, COS, DNSServices, GlobalSearch, + GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, + ResourceManager, or VPC. For example, the IBM Cloud + Private IAM service could be configured with the service + `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` + Whereas the IBM Cloud Private VPC service for US South + (Dallas) could be configured with the service `name` + of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`' + enum: + - CIS + - COS + - DNSServices + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - ResourceController + - ResourceManager + - VPC + type: string + url: + description: url is fully qualified URI with scheme + https, that overrides the default generated endpoint + for a client. This must be provided and cannot be + empty. + type: string + x-kubernetes-validations: + - message: url must be a valid absolute URL + rule: isURL(self) + required: + - name + - url + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + kubevirt: + description: Kubevirt contains settings specific to the kubevirt + infrastructure provider. + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the + default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. + type: string + type: object + nutanix: + description: Nutanix contains settings specific to the Nutanix + infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to + contact the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. These are the IPs for + a self-hosted load balancer in front of the API servers. + In dual stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes to + the default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to + the default ingress controller. The IPs are suitable targets + of a wildcard DNS record used to resolve default route host + names. In dual stack clusters this list contains two IPs + otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used + by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used + by the cluster on Nutanix platform which can be a user-managed + or openshift-managed load balancer that is to be used + for the OpenShift API and Ingress endpoints. When set + to OpenShiftManagedDefault the static pods in charge + of API and Ingress traffic load-balancing defined in + the machine config operator will be deployed. When set + to UserManaged these static pods will not be deployed + and it is expected that the load balancer is configured + out of band by the deployer. When omitted, this means + no opinion and the platform is left to choose a reasonable + default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + type: object + openstack: + description: OpenStack contains settings specific to the OpenStack + infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to + contact the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. These are the IPs for + a self-hosted load balancer in front of the API servers. + In dual stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + cloudName: + description: cloudName is the name of the desired OpenStack + cloud in the client configuration file (`clouds.yaml`). + type: string + ingressIP: + description: "ingressIP is an external IP which routes to + the default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to + the default ingress controller. The IPs are suitable targets + of a wildcard DNS record used to resolve default route host + names. In dual stack clusters this list contains two IPs + otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used + by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used + by the cluster on OpenStack platform which can be a + user-managed or openshift-managed load balancer that + is to be used for the OpenShift API and Ingress endpoints. + When set to OpenShiftManagedDefault the static pods + in charge of API and Ingress traffic load-balancing + defined in the machine config operator will be deployed. + When set to UserManaged these static pods will not be + deployed and it is expected that the load balancer is + configured out of band by the deployer. When omitted, + this means no opinion and the platform is left to choose + a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal + DNS used by the nodes. Unlike the one managed by the DNS + operator, `NodeDNSIP` provides name resolution for the nodes + themselves. There is no DNS-as-a-service for OpenStack deployments. + In order to minimize necessary changes to the datacenter + DNS, a DNS service is hosted as a static pod to serve those + hostnames to the nodes in the cluster. + type: string + type: object + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure + provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to + contact the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. These are the IPs for + a self-hosted load balancer in front of the API servers. + In dual stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes to + the default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to + the default ingress controller. The IPs are suitable targets + of a wildcard DNS record used to resolve default route host + names. In dual stack clusters this list contains two IPs + otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used + by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used + by the cluster on Ovirt platform which can be a user-managed + or openshift-managed load balancer that is to be used + for the OpenShift API and Ingress endpoints. When set + to OpenShiftManagedDefault the static pods in charge + of API and Ingress traffic load-balancing defined in + the machine config operator will be deployed. When set + to UserManaged these static pods will not be deployed + and it is expected that the load balancer is configured + out of band by the deployer. When omitted, this means + no opinion and the platform is left to choose a reasonable + default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + nodeDNSIP: + description: 'deprecated: as of 4.6, this field is no longer + set or honored. It will be removed in a future release.' + type: string + type: object + powervs: + description: PowerVS contains settings specific to the Power Systems + Virtual Servers infrastructure provider. + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud Internet + Services instance managing the DNS zone for the cluster's + base domain + type: string + dnsInstanceCRN: + description: DNSInstanceCRN is the CRN of the DNS Services + instance managing the DNS zone for the cluster's base domain + type: string + region: + description: region holds the default Power VS region for + new Power VS resources created by the cluster. + type: string + resourceGroup: + description: 'resourceGroup is the resource group name for + new IBMCloud resources created for a cluster. The resource + group specified here will be used by cluster-image-registry-operator + to set up a COS Instance in IBMCloud for the cluster registry. + More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. + When omitted, the image registry operator won''t be able + to configure storage, which results in the image registry + cluster operator not being in an available state.' + maxLength: 40 + pattern: ^[a-zA-Z0-9-_ ]+$ + type: string + x-kubernetes-validations: + - message: resourceGroup is immutable once set + rule: oldSelf == '' || self == oldSelf + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints + which will override the default service endpoints of a Power + VS service. + items: + description: PowervsServiceEndpoint stores the configuration + of a custom url to override existing defaults of PowerVS + Services. + properties: + name: + description: name is the name of the Power VS service. + Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api + ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller + Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme + https, that overrides the default generated endpoint + for a client. This must be provided and cannot be + empty. + format: uri + pattern: ^https:// + type: string + required: + - name + - url + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + zone: + description: 'zone holds the default zone for the new Power + VS resources created by the cluster. Note: Currently only + single-zone OCP clusters are supported' + type: string + type: object + x-kubernetes-validations: + - message: cannot unset resourceGroup once set + rule: '!has(oldSelf.resourceGroup) || has(self.resourceGroup)' + type: + description: "type is the underlying infrastructure provider for + the cluster. This value controls whether infrastructure automation + such as service load balancers, dynamic volume provisioning, + machine creation and deletion, and other integrations are enabled. + If None, no infrastructure automation is enabled. Allowed values + are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", + \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", + \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components + may not support all platforms, and must handle unrecognized + platforms as None if they do not support that platform. \n This + value will be synced with to the `status.platform` and `status.platformStatus.type`. + Currently this value cannot be changed once set." + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + vsphere: + description: VSphere contains settings specific to the VSphere + infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to + contact the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. These are the IPs for + a self-hosted load balancer in front of the API servers. + In dual stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes to + the default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to + the default ingress controller. The IPs are suitable targets + of a wildcard DNS record used to resolve default route host + names. In dual stack clusters this list contains two IPs + otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used + by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used + by the cluster on VSphere platform which can be a user-managed + or openshift-managed load balancer that is to be used + for the OpenShift API and Ingress endpoints. When set + to OpenShiftManagedDefault the static pods in charge + of API and Ingress traffic load-balancing defined in + the machine config operator will be deployed. When set + to UserManaged these static pods will not be deployed + and it is expected that the load balancer is configured + out of band by the deployer. When omitted, this means + no opinion and the platform is left to choose a reasonable + default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal + DNS used by the nodes. Unlike the one managed by the DNS + operator, `NodeDNSIP` provides name resolution for the nodes + themselves. There is no DNS-as-a-service for vSphere deployments. + In order to minimize necessary changes to the datacenter + DNS, a DNS service is hosted as a static pod to serve those + hostnames to the nodes in the cluster. + type: string + type: object + type: object + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {}