diff --git a/deployments/nimbus-k8tls/templates/configmap.yaml b/deployments/nimbus-k8tls/templates/configmap.yaml index bf14f255..4b72738d 100644 --- a/deployments/nimbus-k8tls/templates/configmap.yaml +++ b/deployments/nimbus-k8tls/templates/configmap.yaml @@ -3,37 +3,140 @@ kind: ConfigMap metadata: name: fluent-bit-config namespace: {{ include "nimbus-k8tls.fullname" . }}-env + labels: + {{- include "nimbus-k8tls.labels" . | nindent 4 }} data: fluent-bit.conf: | - [SERVICE] - Flush 1 - Log_Level info - Parsers_File parsers.conf + [SERVICE] + Flush 1 + Log_Level info + Parsers_File parsers.conf + + [INPUT] + Name tail + Path /tmp/compact_report.json + Parser json + Tag json.data + DB /tmp/compact_report.db + Read_from_Head true + Exit_On_Eof true - [INPUT] - Name tail - Path /tmp/compact_report.json - Parser json - Tag json.data - DB /tmp/compact_report.db - Read_from_Head true - Exit_On_Eof true - {{- if .Values.output.elasticsearch.enabled }} - [OUTPUT] - Name es - Match * - Host {{ .Values.output.elasticsearch.host }} - Port {{ .Values.output.elasticsearch.port }} - Index {{ .Values.output.elasticsearch.index }} - HTTP_User {{ .Values.output.elasticsearch.user }} - HTTP_Passwd ${ES_PASSWORD} - tls On - tls.verify Off - Suppress_Type_Name On - Replace_Dots On + [OUTPUT] + Name es + Match * + Host {{ .Values.output.elasticsearch.host }} + Port {{ .Values.output.elasticsearch.port }} + Index {{ .Values.output.elasticsearch.index }} + HTTP_User {{ .Values.output.elasticsearch.user }} + HTTP_Passwd ${ES_PASSWORD} + tls On + tls.verify Off + Suppress_Type_Name On + Replace_Dots On {{- end }} - [OUTPUT] - Name stdout - Match * \ No newline at end of file + [OUTPUT] + Name stdout + Match * +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: fips-config + namespace: {{ include "nimbus-k8tls.fullname" . }}-env + labels: + {{- include "nimbus-k8tls.labels" . | nindent 4 }} +data: + fips-140-3.json: |2- + { + "TLS_versions": [ + { + "TLS_version": "TLSv1.0_1.1", + "cipher_suites": [ + { + "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA" + }, + { + "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA" + }, + { + "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA" + }, + { + "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA" + } + ] + }, + { + "TLS_version": "TLSv1.2", + "cipher_suites": [ + { + "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA" + }, + { + "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA" + }, + { + "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA" + }, + { + "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA" + }, + { + "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" + }, + { + "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + }, + { + "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + }, + { + "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" + }, + { + "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_256_CCM" + }, + { + "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_128_CCM" + }, + { + "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8" + }, + { + "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8" + }, + { + "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384" + }, + { + "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" + }, + { + "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384" + }, + { + "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256" + } + ] + }, + { + "TLS_version": "TLSv1.3", + "cipher_suites": [ + { + "cipher_suite": "TLS_AES_256_GCM_SHA384" + }, + { + "cipher_suite": "TLS_AES_128_GCM_SHA256" + }, + { + "cipher_suite": "TLS_AES_128_CCM_SHA256" + }, + { + "cipher_suite": "TLS_AES_128_CCM_8_SHA256" + } + ] + } + ] + } diff --git a/deployments/nimbus-k8tls/templates/deployment.yaml b/deployments/nimbus-k8tls/templates/deployment.yaml index a1ef8957..f084f305 100644 --- a/deployments/nimbus-k8tls/templates/deployment.yaml +++ b/deployments/nimbus-k8tls/templates/deployment.yaml @@ -21,8 +21,8 @@ spec: {{- toYaml .Values.securityContext | nindent 12 }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} - env: {{- if .Values.output.elasticsearch.enabled }} + env: - name: TTLSECONDSAFTERFINISHED value: "{{ .Values.output.elasticsearch.ttlsecondsafterfinished }}" {{- end }} diff --git a/deployments/nimbus-k8tls/templates/k8tls-role.yaml b/deployments/nimbus-k8tls/templates/k8tls-role.yaml new file mode 100644 index 00000000..fd8edf17 --- /dev/null +++ b/deployments/nimbus-k8tls/templates/k8tls-role.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: k8tls + labels: + {{- include "nimbus-k8tls.labels" . | nindent 4 }} +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list diff --git a/deployments/nimbus-k8tls/templates/namespace.yaml b/deployments/nimbus-k8tls/templates/namespace.yaml index 840fa44f..2caea99f 100644 --- a/deployments/nimbus-k8tls/templates/namespace.yaml +++ b/deployments/nimbus-k8tls/templates/namespace.yaml @@ -1,4 +1,6 @@ apiVersion: v1 kind: Namespace metadata: - name: {{ include "nimbus-k8tls.fullname" . }}-env \ No newline at end of file + name: {{ include "nimbus-k8tls.fullname" . }}-env + labels: + {{- include "nimbus-k8tls.labels" . | nindent 4 }} diff --git a/deployments/nimbus-k8tls/templates/role.yaml b/deployments/nimbus-k8tls/templates/role.yaml index 95e2eb7b..873edb4b 100644 --- a/deployments/nimbus-k8tls/templates/role.yaml +++ b/deployments/nimbus-k8tls/templates/role.yaml @@ -2,66 +2,56 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: nimbus-k8tls-clusterrole + name: nimbus-k8tls + labels: + {{- include "nimbus-k8tls.labels" . | nindent 4 }} rules: -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - serviceaccounts - verbs: - - create - - delete - - get - - update -- apiGroups: - - "" - resources: - - services - verbs: - - get - - list -- apiGroups: - - batch - resources: - - cronjobs - verbs: - - create - - delete - - get - - list - - update - - watch -- apiGroups: - - intent.security.nimbus.com - resources: - - clusternimbuspolicies - verbs: - - get - - list - - watch -- apiGroups: - - intent.security.nimbus.com - resources: - - clusternimbuspolicies/status - verbs: - - get - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterrolebindings - - clusterroles - verbs: - - create - - delete - - get - - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - update + - apiGroups: + - "" + resources: + - namespaces + - serviceaccounts + verbs: + - get + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - create + - delete + - get + - list + - update + - watch + - apiGroups: + - intent.security.nimbus.com + resources: + - clusternimbuspolicies + verbs: + - get + - list + - watch + - apiGroups: + - intent.security.nimbus.com + resources: + - clusternimbuspolicies/status + verbs: + - get + - patch + - update {{- if .Values.output.elasticsearch.enabled }} -- apiGroups: [""] - resources: ["secrets"] - resourceNames: ["elasticsearch-password"] - verbs: ["get"] + - apiGroups: [ "" ] + resources: [ "secrets" ] + resourceNames: [ "elasticsearch-password" ] + verbs: [ "get" ] {{- end }} diff --git a/deployments/nimbus-k8tls/templates/rolebinding.yaml b/deployments/nimbus-k8tls/templates/rolebinding.yaml index 5b21eac6..f2d322f5 100644 --- a/deployments/nimbus-k8tls/templates/rolebinding.yaml +++ b/deployments/nimbus-k8tls/templates/rolebinding.yaml @@ -1,12 +1,29 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: {{ include "nimbus-k8tls.fullname" . }}-clusterrole-binding + name: {{ include "nimbus-k8tls.fullname" . }} + labels: + {{- include "nimbus-k8tls.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: nimbus-k8tls-clusterrole + name: {{ include "nimbus-k8tls.fullname" . }} subjects: - kind: ServiceAccount name: {{ include "nimbus-k8tls.serviceAccountName" . }} namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: k8tls + labels: + {{- include "nimbus-k8tls.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: k8tls +subjects: + - kind: ServiceAccount + name: k8tls + namespace: {{ include "nimbus-k8tls.fullname" . }}-env diff --git a/deployments/nimbus-k8tls/templates/secret.yaml b/deployments/nimbus-k8tls/templates/secret.yaml index ed893cb8..b73d0a0a 100644 --- a/deployments/nimbus-k8tls/templates/secret.yaml +++ b/deployments/nimbus-k8tls/templates/secret.yaml @@ -4,6 +4,8 @@ kind: Secret metadata: name: elasticsearch-password namespace: {{ include "nimbus-k8tls.fullname" . }}-env + labels: + {{- include "nimbus-k8tls.labels" . | nindent 4 }} type: Opaque data: es_password: {{ .Values.output.elasticsearch.password }} diff --git a/deployments/nimbus-k8tls/templates/serviceaccount.yaml b/deployments/nimbus-k8tls/templates/serviceaccount.yaml index 471ec9a6..0219d415 100644 --- a/deployments/nimbus-k8tls/templates/serviceaccount.yaml +++ b/deployments/nimbus-k8tls/templates/serviceaccount.yaml @@ -8,3 +8,11 @@ metadata: {{- include "nimbus-k8tls.labels" . | nindent 4 }} automountServiceAccountToken: {{ .Values.serviceAccount.automount }} {{- end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: k8tls + namespace: {{ include "nimbus-k8tls.fullname" . }}-env + labels: + {{- include "nimbus-k8tls.labels" . | nindent 4 }} diff --git a/pkg/adapter/common/common.go b/pkg/adapter/common/common.go index c50fde7b..ce62a2db 100644 --- a/pkg/adapter/common/common.go +++ b/pkg/adapter/common/common.go @@ -12,5 +12,5 @@ type ContextKey string const ( K8sClientKey ContextKey = "k8sClient" - NamespaceNameKey ContextKey = "NamespaceName" + NamespaceNameKey ContextKey = "K8tlsNamespace" ) diff --git a/pkg/adapter/nimbus-k8tls/manager/cronjob.go b/pkg/adapter/nimbus-k8tls/manager/cronjob.go index 4b8ccc13..763f2675 100644 --- a/pkg/adapter/nimbus-k8tls/manager/cronjob.go +++ b/pkg/adapter/nimbus-k8tls/manager/cronjob.go @@ -22,8 +22,8 @@ import ( ) func createOrUpdateCj(ctx context.Context, logger logr.Logger, cwnp v1alpha1.ClusterNimbusPolicy, cronJob *batchv1.CronJob) { - cronJob.Namespace = NamespaceName - cronJob.Spec.JobTemplate.Spec.Template.Spec.ServiceAccountName = NamespaceName + cronJob.Namespace = K8tlsNamespace + cronJob.Spec.JobTemplate.Spec.Template.Spec.ServiceAccountName = k8tls if err := ctrl.SetControllerReference(&cwnp, cronJob, scheme); err != nil { logger.Error(err, "failed to set OwnerReference on Kubernetes CronJob", "CronJob.Name", cronJob.Name) return @@ -76,7 +76,7 @@ func deleteCronJobs(ctx context.Context, logger logr.Logger, cwnpName string, cr func createCm(ctx context.Context, cwnp v1alpha1.ClusterNimbusPolicy, scheme *runtime.Scheme, k8sClient client.Client, configMap *corev1.ConfigMap) error { logger := log.FromContext(ctx) - configMap.SetNamespace(NamespaceName) + configMap.SetNamespace(K8tlsNamespace) if err := ctrl.SetControllerReference(&cwnp, configMap, scheme); err != nil { return err } diff --git a/pkg/adapter/nimbus-k8tls/manager/k8tls.go b/pkg/adapter/nimbus-k8tls/manager/k8tls.go index 95264e16..8f826d6f 100644 --- a/pkg/adapter/nimbus-k8tls/manager/k8tls.go +++ b/pkg/adapter/nimbus-k8tls/manager/k8tls.go @@ -5,248 +5,28 @@ package manager import ( "context" - "fmt" - "strings" corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - - "github.com/5GSEC/nimbus/api/v1alpha1" ) -//+kubebuilder:rbac:groups="",resources=namespaces;serviceaccounts;configmaps,verbs=get;create;delete;update -//+kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=clusterroles;clusterrolebindings,verbs=get;create;delete;update -//+kubebuilder:rbac:groups="",resources=services,verbs=get;list - -func setupK8tlsEnv(ctx context.Context, cwnp v1alpha1.ClusterNimbusPolicy, scheme *runtime.Scheme, k8sClient client.Client) error { +func k8tlsEnvExist(ctx context.Context, k8sClient client.Client) bool { logger := log.FromContext(ctx) - // Retrieve the namespace ns := &corev1.Namespace{} - err := k8sClient.Get(ctx, client.ObjectKey{Name: NamespaceName}, ns) - if err != nil { - if errors.IsNotFound(err) { - logger.Error(err, "failed to fetch Namespace", "Namespace.Name", NamespaceName) - } - return err + if err := k8sClient.Get(ctx, client.ObjectKey{Name: K8tlsNamespace}, ns); err != nil { + logger.Error(err, "'k8tls' namespace not found") + return false } - cm := &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ConfigMap", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "fips-config", - Namespace: NamespaceName, - Labels: ns.Labels, - Annotations: ns.Annotations, - }, - Data: map[string]string{ - "fips-140-3.json": ` -{ - "TLS_versions": [ - { - "TLS_version": "TLSv1.0_1.1", - "cipher_suites": [ - { - "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA" - }, - { - "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA" - }, - { - "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA" - }, - { - "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA" - } - ] - }, - { - "TLS_version": "TLSv1.2", - "cipher_suites": [ - { - "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA" - }, - { - "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA" - }, - { - "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA" - }, - { - "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA" - }, - { - "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" - }, - { - "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" - }, - { - "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" - }, - { - "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" - }, - { - "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_256_CCM" - }, - { - "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_128_CCM" - }, - { - "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8" - }, - { - "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8" - }, - { - "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384" - }, - { - "cipher_suite": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" - }, - { - "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384" - }, - { - "cipher_suite": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256" - } - ] - }, - { - "TLS_version": "TLSv1.3", - "cipher_suites": [ - { - "cipher_suite": "TLS_AES_256_GCM_SHA384" - }, - { - "cipher_suite": "TLS_AES_128_GCM_SHA256" - }, - { - "cipher_suite": "TLS_AES_128_CCM_SHA256" - }, - { - "cipher_suite": "TLS_AES_128_CCM_8_SHA256" - } - ] - } - ] -}`, - }, - } - - objectMeta := metav1.ObjectMeta{ - Name: ns.Name, - Namespace: ns.Name, - Labels: ns.Labels, - Annotations: ns.Annotations, - } - - sa := &corev1.ServiceAccount{ - TypeMeta: metav1.TypeMeta{ - APIVersion: corev1.SchemeGroupVersion.String(), - Kind: "ServiceAccount", - }, - ObjectMeta: objectMeta, - } - - clusterRole := &rbacv1.ClusterRole{ - TypeMeta: metav1.TypeMeta{ - APIVersion: rbacv1.SchemeGroupVersion.String(), - Kind: "ClusterRole", - }, - ObjectMeta: objectMeta, - Rules: []rbacv1.PolicyRule{ - { - Verbs: []string{"get", "list"}, - APIGroups: []string{""}, - Resources: []string{"services"}, - }, - }, - } - - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - TypeMeta: metav1.TypeMeta{ - APIVersion: rbacv1.SchemeGroupVersion.String(), - Kind: "ClusterRoleBinding", - }, - ObjectMeta: objectMeta, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - APIGroup: "", - Name: sa.Name, - Namespace: sa.Namespace, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: clusterRole.Name, - }, - } - - objs := []client.Object{ns, cm, sa, clusterRole, clusterRoleBinding} - for idx := range objs { - objToCreate := objs[idx] - - // Don't set owner ref on namespace. In environments with configured Pod Security - // Standards labelling namespaces becomes a requirement. However, on deletion of - // CWNP a namespace with ownerReferences set also gets deleted. Since we need to - // keep the nimbus-k8tls-env namespace labeled, removing the ownerReferences - // prevents this deletion. - if idx != 0 { - if err := ctrl.SetControllerReference(&cwnp, objToCreate, scheme); err != nil { - return err - } - } - - var existingObj client.Object - - // Set the type of object, otherwise existingObj will always remain nil. - switch objToCreate.(type) { - case *corev1.Namespace: - existingObj = &corev1.Namespace{} - case *corev1.ConfigMap: - existingObj = &corev1.ConfigMap{} - case *corev1.ServiceAccount: - existingObj = &corev1.ServiceAccount{} - case *rbacv1.ClusterRole: - existingObj = &rbacv1.ClusterRole{} - case *rbacv1.ClusterRoleBinding: - existingObj = &rbacv1.ClusterRoleBinding{} - } - - err := k8sClient.Get(ctx, client.ObjectKeyFromObject(objToCreate), existingObj) - if err != nil && !errors.IsNotFound(err) { - return err - } - - objKind := strings.ToLower(objToCreate.GetObjectKind().GroupVersionKind().Kind) - if err != nil { - if errors.IsNotFound(err) { - if err := k8sClient.Create(ctx, objToCreate); err != nil { - return err - } - logger.Info(fmt.Sprintf("created %s/%s", objKind, objToCreate.GetName())) - } - } else { - objToCreate.SetResourceVersion(existingObj.GetResourceVersion()) - if err := k8sClient.Update(ctx, objToCreate); err != nil { - return err - } - logger.Info(fmt.Sprintf("configured %s/%s", objKind, objToCreate.GetName())) - } + sa := &corev1.ServiceAccount{} + if err := k8sClient.Get(ctx, client.ObjectKey{Name: k8tls, Namespace: K8tlsNamespace}, sa); err != nil { + logger.Error(err, "'k8tls' serviceaccount not found") + return false } - return nil + // If the required ClusterRole and ClusterRoleBinding resources don't exist, the + // job itself will describe/log that error. + return true } diff --git a/pkg/adapter/nimbus-k8tls/manager/manager.go b/pkg/adapter/nimbus-k8tls/manager/manager.go index d765a44f..31598f73 100644 --- a/pkg/adapter/nimbus-k8tls/manager/manager.go +++ b/pkg/adapter/nimbus-k8tls/manager/manager.go @@ -29,9 +29,10 @@ import ( ) var ( - scheme = runtime.NewScheme() - k8sClient client.Client - NamespaceName = "nimbus-k8tls-env" + scheme = runtime.NewScheme() + k8sClient client.Client + K8tlsNamespace = "nimbus-k8tls-env" + k8tls = "k8tls" ) func init() { @@ -45,6 +46,8 @@ func init() { //+kubebuilder:rbac:groups=intent.security.nimbus.com,resources=clusternimbuspolicies,verbs=get;list;watch //+kubebuilder:rbac:groups=intent.security.nimbus.com,resources=clusternimbuspolicies/status,verbs=get;update;patch //+kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;create;delete;list;watch;update +//+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;create;delete;update +//+kubebuilder:rbac:groups="",resources=namespaces;serviceaccounts,verbs=get func Run(ctx context.Context) { cwnpCh := make(chan string) @@ -108,14 +111,14 @@ func createOrUpdateCronJob(ctx context.Context, cwnpName string) { deleteDanglingCj(ctx, logger, cwnp) newCtx := context.WithValue(ctx, common.K8sClientKey, k8sClient) - newCtx = context.WithValue(newCtx, common.NamespaceNameKey, NamespaceName) + newCtx = context.WithValue(newCtx, common.NamespaceNameKey, K8tlsNamespace) cronJob, configMap := builder.BuildCronJob(newCtx, cwnp) if cronJob != nil { - if err := setupK8tlsEnv(ctx, cwnp, scheme, k8sClient); err != nil { - logger.Error(err, "failed to setup k8tls env") + if !k8tlsEnvExist(ctx, k8sClient) { return } + if configMap != nil { if err := createCm(ctx, cwnp, scheme, k8sClient, configMap); err != nil { logger.Error(err, "failed to create ConfigMap", "ConfigMap.Name", configMap.Name) @@ -130,7 +133,7 @@ func logCronJobsToDelete(ctx context.Context, deletedCwnp *unstructured.Unstruct logger := log.FromContext(ctx) var existingCronJobs batchv1.CronJobList - if err := k8sClient.List(ctx, &existingCronJobs, &client.ListOptions{Namespace: NamespaceName}); err != nil { + if err := k8sClient.List(ctx, &existingCronJobs, &client.ListOptions{Namespace: K8tlsNamespace}); err != nil { logger.Error(err, "failed to list Kubernetes CronJob") return } @@ -151,7 +154,7 @@ func logCronJobsToDelete(ctx context.Context, deletedCwnp *unstructured.Unstruct func deleteDanglingCj(ctx context.Context, logger logr.Logger, cwnp v1alpha1.ClusterNimbusPolicy) { var existingCronJobs batchv1.CronJobList - if err := k8sClient.List(ctx, &existingCronJobs, &client.ListOptions{Namespace: NamespaceName}); err != nil { + if err := k8sClient.List(ctx, &existingCronJobs, &client.ListOptions{Namespace: K8tlsNamespace}); err != nil { logger.Error(err, "failed to list Kubernetes CronJob for cleanup") return }