Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add automatic RBAC creation for prometheus receiver #3459

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions .chloggen/3078-add-prometheus-receiver-rbac.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
component: collector

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Create RBAC automatically for the Prometheus receiver

# One or more tracking issues related to the change
issues: [3078]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,7 @@ add-rbac-permissions-to-operator: manifests kustomize
cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/cronjobs.yaml
cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/daemonsets.yaml
cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/events.yaml
cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/endpoints.yaml
cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/extensions.yaml
cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/namespaces.yaml
cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/namespaces-status.yaml
Expand Down
78 changes: 74 additions & 4 deletions apis/v1beta1/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ type Config struct {
}

// getRbacRulesForComponentKinds gets the RBAC Rules for the given ComponentKind(s).
func (c *Config) getRbacRulesForComponentKinds(logger logr.Logger, componentKinds ...ComponentKind) ([]rbacv1.PolicyRule, error) {
func (c *Config) getClusterRoleRbacRulesForComponentKinds(logger logr.Logger, componentKinds ...ComponentKind) ([]rbacv1.PolicyRule, error) {
var rules []rbacv1.PolicyRule
enabledComponents := c.GetEnabledComponents()
for _, componentKind := range componentKinds {
Expand All @@ -180,7 +180,7 @@ func (c *Config) getRbacRulesForComponentKinds(logger logr.Logger, componentKind
for componentName := range enabledComponents[componentKind] {
// TODO: Clean up the naming here and make it simpler to use a retriever.
parser := retriever(componentName)
if parsedRules, err := parser.GetRBACRules(logger, cfg.Object[componentName]); err != nil {
if parsedRules, err := parser.GetClusterRoleRules(logger, cfg.Object[componentName]); err != nil {
return nil, err
} else {
rules = append(rules, parsedRules...)
Expand All @@ -190,6 +190,68 @@ func (c *Config) getRbacRulesForComponentKinds(logger logr.Logger, componentKind
return rules, nil
}

// getRbacRolesForComponentKinds gets the RBAC Roles for the given ComponentKind(s).
func (c *Config) getRbacRolesForComponentKinds(logger logr.Logger, otelCollectorName string, componentKinds ...ComponentKind) ([]*rbacv1.Role, error) {
var roles []*rbacv1.Role
enabledComponents := c.GetEnabledComponents()
for _, componentKind := range componentKinds {
var retriever components.ParserRetriever
var cfg AnyConfig
switch componentKind {
case KindReceiver:
retriever = receivers.ReceiverFor
cfg = c.Receivers
case KindExporter:
continue
case KindProcessor:
continue
case KindExtension:
continue
}
for componentName := range enabledComponents[componentKind] {
// TODO: Clean up the naming here and make it simpler to use a retriever.
parser := retriever(componentName)
if parsedRoles, err := parser.GetRbacRoles(logger, otelCollectorName, cfg.Object[componentName]); err != nil {
return nil, err
} else {
roles = append(roles, parsedRoles...)
}
}
}
return roles, nil
}

// getRbacRoleBindingsForComponentKinds gets the RBAC RoleBindings for the given ComponentKind(s).
func (c *Config) getRbacRoleBindingsForComponentKinds(logger logr.Logger, serviceAccountName string, otelCollectorName string, otelCollectorNamespace string, componentKinds ...ComponentKind) ([]*rbacv1.RoleBinding, error) {
var roleBindings []*rbacv1.RoleBinding
enabledComponents := c.GetEnabledComponents()
for _, componentKind := range componentKinds {
var retriever components.ParserRetriever
var cfg AnyConfig
switch componentKind {
case KindReceiver:
retriever = receivers.ReceiverFor
cfg = c.Receivers
case KindExporter:
continue
case KindProcessor:
continue
case KindExtension:
continue
}
for componentName := range enabledComponents[componentKind] {
// TODO: Clean up the naming here and make it simpler to use a retriever.
parser := retriever(componentName)
if parsedRoleBindings, err := parser.GetRbacRoleBindings(logger, otelCollectorName, cfg.Object[componentName], serviceAccountName, otelCollectorNamespace); err != nil {
return nil, err
} else {
roleBindings = append(roleBindings, parsedRoleBindings...)
}
}
}
return roleBindings, nil
}

// getPortsForComponentKinds gets the ports for the given ComponentKind(s).
func (c *Config) getPortsForComponentKinds(logger logr.Logger, componentKinds ...ComponentKind) ([]corev1.ServicePort, error) {
var ports []corev1.ServicePort
Expand Down Expand Up @@ -340,8 +402,16 @@ func (c *Config) GetEnvironmentVariables(logger logr.Logger) ([]corev1.EnvVar, e
return c.getEnvironmentVariablesForComponentKinds(logger, KindReceiver)
}

func (c *Config) GetAllRbacRules(logger logr.Logger) ([]rbacv1.PolicyRule, error) {
return c.getRbacRulesForComponentKinds(logger, KindReceiver, KindExporter, KindProcessor)
func (c *Config) GetAllClusterRoleRbacRules(logger logr.Logger) ([]rbacv1.PolicyRule, error) {
return c.getClusterRoleRbacRulesForComponentKinds(logger, KindReceiver, KindExporter, KindProcessor)
}

func (c *Config) GetAllRbacRoles(logger logr.Logger, otelCollectorName string) ([]*rbacv1.Role, error) {
return c.getRbacRolesForComponentKinds(logger, otelCollectorName, KindReceiver, KindExporter, KindProcessor)
}

func (c *Config) GetAllRbacRoleBindings(logger logr.Logger, serviceAccountName string, otelCollectorName string, otelCollectorNamespace string) ([]*rbacv1.RoleBinding, error) {
return c.getRbacRoleBindingsForComponentKinds(logger, serviceAccountName, otelCollectorName, otelCollectorNamespace, KindReceiver, KindExporter, KindProcessor)
}

func (c *Config) ApplyDefaults(logger logr.Logger) error {
Expand Down
14 changes: 10 additions & 4 deletions controllers/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,16 +158,22 @@ func reconcileDesiredObjects(ctx context.Context, kubeClient client.Client, logg
"object_kind", desired.GetObjectKind(),
)
if isNamespaceScoped(desired) {
if setErr := ctrl.SetControllerReference(owner, desired, scheme); setErr != nil {
l.Error(setErr, "failed to set controller owner reference to desired")
errs = append(errs, setErr)
continue
switch desired.(type) {
case *rbacv1.Role, *rbacv1.RoleBinding:
l.Info("skipping setting controller reference for role or rolebinding")
iblancasa marked this conversation as resolved.
Show resolved Hide resolved
default:
if setErr := ctrl.SetControllerReference(owner, desired, scheme); setErr != nil {
l.Error(setErr, "failed to set controller owner reference to desired")
errs = append(errs, setErr)
continue
}
}
}
// existing is an object the controller runtime will hydrate for us
// we obtain the existing object by deep copying the desired object because it's the most convenient way
existing := desired.DeepCopyObject().(client.Object)
mutateFn := manifests.MutateFuncFor(existing, desired)

var op controllerutil.OperationResult
crudErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
result, createOrUpdateErr := ctrl.CreateOrUpdate(ctx, kubeClient, existing, mutateFn)
Expand Down
36 changes: 28 additions & 8 deletions controllers/opentelemetrycollector_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ var (
ownedClusterObjectTypes = []client.Object{
&rbacv1.ClusterRole{},
&rbacv1.ClusterRoleBinding{},
&rbacv1.Role{},
&rbacv1.RoleBinding{},
}
)

Expand Down Expand Up @@ -91,14 +93,14 @@ func (r *OpenTelemetryCollectorReconciler) findOtelOwnedObjects(ctx context.Cont
client.InNamespace(params.OtelCol.Namespace),
client.MatchingFields{resourceOwnerKey: params.OtelCol.Name},
}
rbacObjectsFound := false
for _, objectType := range ownedObjectTypes {
var objs map[types.UID]client.Object
objs, err := getList(ctx, r, objectType, listOpts...)
if err != nil {
return nil, err
}
for uid, object := range objs {
ownedObjects[uid] = object
}

// save Collector ConfigMaps into a separate slice, we need to do additional filtering on them
switch objectType.(type) {
case *corev1.ConfigMap:
Expand All @@ -110,8 +112,20 @@ func (r *OpenTelemetryCollectorReconciler) findOtelOwnedObjects(ctx context.Cont
configMap := object.(*corev1.ConfigMap)
collectorConfigMaps = append(collectorConfigMaps, configMap)
}
case *rbacv1.ClusterRoleBinding, *rbacv1.ClusterRole, *rbacv1.RoleBinding, *rbacv1.Role:
if params.Config.CreateRBACPermissions() == rbac.Available && !rbacObjectsFound {
objs, err = r.findRBACObjects(ctx, params)
if err != nil {
return nil, err
}
rbacObjectsFound = true
}
default:
}

for uid, object := range objs {
ownedObjects[uid] = object
}
}
// at this point we don't know if the most recent ConfigMap will still be the most recent after reconciliation, or
// if a new one will be created. We keep one additional ConfigMap to account for this. The next reconciliation that
Expand All @@ -125,11 +139,15 @@ func (r *OpenTelemetryCollectorReconciler) findOtelOwnedObjects(ctx context.Cont
return ownedObjects, nil
}

// The cluster scope objects do not have owner reference.
func (r *OpenTelemetryCollectorReconciler) findClusterRoleObjects(ctx context.Context, params manifests.Params) (map[types.UID]client.Object, error) {
// findRBACObjects finds ClusterRoles, ClusterRoleBindings, Roles, and RoleBindings.
// Those objects do not have owner references.
// - ClusterRoles and ClusterRoleBindings cannot have owner references
// - Roles and RoleBindings can exist in a different namespace than the OpenTelemetryCollector
//
// Users might switch off the RBAC creation feature on the operator which should remove existing RBAC.
func (r *OpenTelemetryCollectorReconciler) findRBACObjects(ctx context.Context, params manifests.Params) (map[types.UID]client.Object, error) {
ownedObjects := map[types.UID]client.Object{}
// Remove cluster roles and bindings.
// Users might switch off the RBAC creation feature on the operator which should remove existing RBAC.

listOpsCluster := &client.ListOptions{
LabelSelector: labels.SelectorFromSet(
manifestutils.SelectorLabels(params.OtelCol.ObjectMeta, collector.ComponentOpenTelemetryCollector)),
Expand Down Expand Up @@ -356,6 +374,8 @@ func (r *OpenTelemetryCollectorReconciler) GetOwnedResourceTypes() []client.Obje
if r.config.CreateRBACPermissions() == rbac.Available {
ownedResources = append(ownedResources, &rbacv1.ClusterRole{})
ownedResources = append(ownedResources, &rbacv1.ClusterRoleBinding{})
ownedResources = append(ownedResources, &rbacv1.Role{})
ownedResources = append(ownedResources, &rbacv1.RoleBinding{})
}

if featuregate.PrometheusOperatorIsAvailable.IsEnabled() && r.config.PrometheusCRAvailability() == prometheus.Available {
Expand All @@ -375,7 +395,7 @@ const collectorFinalizer = "opentelemetrycollector.opentelemetry.io/finalizer"
func (r *OpenTelemetryCollectorReconciler) finalizeCollector(ctx context.Context, params manifests.Params) error {
// The cluster scope objects do not have owner reference. They need to be deleted explicitly
if params.Config.CreateRBACPermissions() == rbac.Available {
objects, err := r.findClusterRoleObjects(ctx, params)
objects, err := r.findRBACObjects(ctx, params)
if err != nil {
return err
}
Expand Down
124 changes: 124 additions & 0 deletions controllers/opentelemetrycollector_controller_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package controllers

import (
"context"
"testing"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/log/zap"

"github.com/open-telemetry/opentelemetry-operator/apis/v1beta1"
"github.com/open-telemetry/opentelemetry-operator/internal/config"
)

func TestReconcile(t *testing.T) {
logger := zap.New()
ctx := context.Background()

scheme := runtime.NewScheme()
require.NoError(t, v1beta1.AddToScheme(scheme))
require.NoError(t, corev1.AddToScheme(scheme))

tests := []struct {
name string
existingState []runtime.Object
expectedResult ctrl.Result
expectedError bool
}{
{
name: "collector not found",
existingState: []runtime.Object{},
expectedResult: ctrl.Result{},
expectedError: false,
},
{
name: "unmanaged collector",
existingState: []runtime.Object{
&v1beta1.OpenTelemetryCollector{
ObjectMeta: metav1.ObjectMeta{
Name: "test-collector",
Namespace: "default",
},
Spec: v1beta1.OpenTelemetryCollectorSpec{
OpenTelemetryCommonFields: v1beta1.OpenTelemetryCommonFields{
ManagementState: v1beta1.ManagementStateUnmanaged,
},
},
},
},
expectedResult: ctrl.Result{},
expectedError: false,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
client := fake.NewClientBuilder().
WithScheme(scheme).
WithRuntimeObjects(tt.existingState...).
Build()

r := &OpenTelemetryCollectorReconciler{
Client: client,
log: logger,
scheme: scheme,
config: config.New(),
recorder: record.NewFakeRecorder(100),
}

result, err := r.Reconcile(ctx, ctrl.Request{})

if tt.expectedError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
assert.Equal(t, tt.expectedResult, result)
})
}
}

func TestNewReconciler(t *testing.T) {
scheme := runtime.NewScheme()
client := fake.NewClientBuilder().WithScheme(scheme).Build()
recorder := record.NewFakeRecorder(100)
logger := zap.New()
cfg := config.New()

params := Params{
Client: client,
Recorder: recorder,
Scheme: scheme,
Log: logger,
Config: cfg,
}

r := NewReconciler(params)

assert.Equal(t, client, r.Client)
assert.Equal(t, recorder, r.recorder)
assert.Equal(t, scheme, r.scheme)
assert.Equal(t, logger, r.log)
assert.Equal(t, cfg, r.config)
}
Loading
Loading