diff --git a/systemtest/src/main/java/io/strimzi/systemtest/performance/utils/UserOperatorPerformanceUtils.java b/systemtest/src/main/java/io/strimzi/systemtest/performance/utils/UserOperatorPerformanceUtils.java index 9652e03463..6ffdee371f 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/performance/utils/UserOperatorPerformanceUtils.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/performance/utils/UserOperatorPerformanceUtils.java @@ -87,7 +87,7 @@ public static List getListOfKafkaUsers(final TestStorage testStorage, for (int i = startPointer; i < endPointer; i++) { if (userAuthType.equals(UserAuthType.Tls)) { usersList.add( - KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), testStorage.getClusterName(), userName + "-" + i) + KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), userName + "-" + i, testStorage.getClusterName()) .editOrNewSpec() .withAuthorization(usersAcl) .endSpec() @@ -95,7 +95,7 @@ public static List getListOfKafkaUsers(final TestStorage testStorage, ); } else { usersList.add( - KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), testStorage.getClusterName(), userName + "-" + i) + KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), userName + "-" + i, testStorage.getClusterName()) .editOrNewSpec() .withAuthorization(usersAcl) .endSpec() diff --git a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaBridgeTemplates.java b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaBridgeTemplates.java index 48b4a6612b..0df72a7285 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaBridgeTemplates.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaBridgeTemplates.java @@ -4,38 +4,32 @@ */ package io.strimzi.systemtest.templates.crd; -import io.strimzi.api.kafka.model.bridge.KafkaBridge; import io.strimzi.api.kafka.model.bridge.KafkaBridgeBuilder; -import io.strimzi.systemtest.TestConstants; -import io.strimzi.systemtest.resources.ResourceManager; -import io.strimzi.test.TestUtils; public class KafkaBridgeTemplates { private KafkaBridgeTemplates() {} - public static KafkaBridgeBuilder kafkaBridge(String name, String bootstrap, int kafkaBridgeReplicas) { - return kafkaBridge(name, name, bootstrap, kafkaBridgeReplicas); - } - - public static KafkaBridgeBuilder kafkaBridge(String name, String clusterName, String bootstrap, int kafkaBridgeReplicas) { - KafkaBridge kafkaBridge = getKafkaBridgeFromYaml(TestConstants.PATH_TO_KAFKA_BRIDGE_CONFIG); - return defaultKafkaBridge(kafkaBridge, name, clusterName, bootstrap, kafkaBridgeReplicas); - } + private final static int DEFAULT_HTTP_PORT = 8080; - public static KafkaBridgeBuilder kafkaBridgeWithCors(String name, String bootstrap, int kafkaBridgeReplicas, - String allowedCorsOrigin, String allowedCorsMethods) { - return kafkaBridgeWithCors(name, name, bootstrap, kafkaBridgeReplicas, allowedCorsOrigin, allowedCorsMethods); + public static KafkaBridgeBuilder kafkaBridge( + String namespaceName, + String bridgeName, + String bootstrap, + int kafkaBridgeReplicas + ) { + return defaultKafkaBridge(namespaceName, bridgeName, bootstrap, kafkaBridgeReplicas); } - public static KafkaBridgeBuilder kafkaBridgeWithCors(String name, String clusterName, String bootstrap, - int kafkaBridgeReplicas, String allowedCorsOrigin, - String allowedCorsMethods) { - KafkaBridge kafkaBridge = getKafkaBridgeFromYaml(TestConstants.PATH_TO_KAFKA_BRIDGE_CONFIG); - - KafkaBridgeBuilder kafkaBridgeBuilder = defaultKafkaBridge(kafkaBridge, name, clusterName, bootstrap, kafkaBridgeReplicas); - - kafkaBridgeBuilder + public static KafkaBridgeBuilder kafkaBridgeWithCors( + String namespaceName, + String bridgeName, + String bootstrap, + int kafkaBridgeReplicas, + String allowedCorsOrigin, + String allowedCorsMethods + ) { + return defaultKafkaBridge(namespaceName, bridgeName, bootstrap, kafkaBridgeReplicas) .editSpec() .editHttp() .withNewCors() @@ -44,39 +38,40 @@ public static KafkaBridgeBuilder kafkaBridgeWithCors(String name, String cluster .endCors() .endHttp() .endSpec(); - - return kafkaBridgeBuilder; } - public static KafkaBridgeBuilder kafkaBridgeWithMetrics(String name, String clusterName, String bootstrap) { - return kafkaBridgeWithMetrics(name, clusterName, bootstrap, 1); - } - - public static KafkaBridgeBuilder kafkaBridgeWithMetrics(String name, String clusterName, String bootstrap, int kafkaBridgeReplicas) { - KafkaBridge kafkaBridge = getKafkaBridgeFromYaml(TestConstants.PATH_TO_KAFKA_BRIDGE_CONFIG); - - return defaultKafkaBridge(kafkaBridge, name, clusterName, bootstrap, kafkaBridgeReplicas) + public static KafkaBridgeBuilder kafkaBridgeWithMetrics( + String namespaceName, + String bridgeName, + String bootstrap, + int kafkaBridgeReplicas + ) { + return defaultKafkaBridge(namespaceName, bridgeName, bootstrap, kafkaBridgeReplicas) .editSpec() .withEnableMetrics(true) .endSpec(); } - private static KafkaBridgeBuilder defaultKafkaBridge(KafkaBridge kafkaBridge, String name, String kafkaClusterName, String bootstrap, int kafkaBridgeReplicas) { - return new KafkaBridgeBuilder(kafkaBridge) + private static KafkaBridgeBuilder defaultKafkaBridge( + String namespaceName, + String bridgeName, + String bootstrap, + int kafkaBridgeReplicas + ) { + return new KafkaBridgeBuilder() .withNewMetadata() - .withName(name) - .withNamespace(ResourceManager.kubeClient().getNamespace()) + .withName(bridgeName) + .withNamespace(namespaceName) .endMetadata() - .editSpec() + .withNewSpec() .withBootstrapServers(bootstrap) .withReplicas(kafkaBridgeReplicas) .withNewInlineLogging() .addToLoggers("bridge.root.logger", "DEBUG") .endInlineLogging() + .withNewHttp() + .withPort(DEFAULT_HTTP_PORT) + .endHttp() .endSpec(); } - - private static KafkaBridge getKafkaBridgeFromYaml(String yamlPath) { - return TestUtils.configFromYaml(yamlPath, KafkaBridge.class); - } } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaConnectTemplates.java b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaConnectTemplates.java index 3d951d5c54..7a1b786321 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaConnectTemplates.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaConnectTemplates.java @@ -6,8 +6,8 @@ import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.fabric8.kubernetes.api.model.ConfigMap; +import io.fabric8.kubernetes.api.model.ConfigMapBuilder; import io.strimzi.api.kafka.model.common.CertSecretSourceBuilder; -import io.strimzi.api.kafka.model.connect.KafkaConnect; import io.strimzi.api.kafka.model.connect.KafkaConnectBuilder; import io.strimzi.api.kafka.model.connect.KafkaConnectResources; import io.strimzi.api.kafka.model.connect.build.DockerOutput; @@ -25,51 +25,70 @@ import java.util.Random; -import static io.strimzi.test.k8s.KubeClusterResource.kubeClient; - public class KafkaConnectTemplates { private static final Logger LOGGER = LogManager.getLogger(KafkaConnectTemplates.class); - private KafkaConnectTemplates() {} - - public static KafkaConnectBuilder kafkaConnect(String name, final String namespaceName, String clusterName, int kafkaConnectReplicas, String pathToConnectConfig) { - KafkaConnect kafkaConnect = getKafkaConnectFromYaml(pathToConnectConfig); - return defaultKafkaConnect(kafkaConnect, namespaceName, name, clusterName, kafkaConnectReplicas); - } + private static final String METRICS_CONNECT_CONFIG_MAP_SUFFIX = "-connect-metrics"; + private static final String CONFIG_MAP_KEY = "metrics-config.yml"; - public static KafkaConnectBuilder kafkaConnect(String name, final String namespaceName, String clusterName, int kafkaConnectReplicas) { - return kafkaConnect(name, namespaceName, clusterName, kafkaConnectReplicas, TestConstants.PATH_TO_KAFKA_CONNECT_CONFIG); - } + private KafkaConnectTemplates() {} - public static KafkaConnectBuilder kafkaConnect(String name, final String namespaceName, int kafkaConnectReplicas) { - return kafkaConnect(name, namespaceName, name, kafkaConnectReplicas, TestConstants.PATH_TO_KAFKA_CONNECT_CONFIG); + public static KafkaConnectBuilder kafkaConnect( + final String namespaceName, + final String kafkaConnectClusterName, + final String kafkaClusterName, + final int kafkaConnectReplicas + ) { + return defaultKafkaConnect(namespaceName, kafkaConnectClusterName, kafkaClusterName, kafkaConnectReplicas); } - public static KafkaConnectBuilder kafkaConnectWithMetrics(String name, String namespaceName, int kafkaConnectReplicas) { - return kafkaConnectWithMetrics(name, namespaceName, name, kafkaConnectReplicas); + public static KafkaConnectBuilder kafkaConnect( + final String namespaceName, + final String kafkaClusterName, + final int kafkaConnectReplicas + ) { + return kafkaConnect(namespaceName, kafkaClusterName, kafkaClusterName, kafkaConnectReplicas); } - public static KafkaConnectBuilder kafkaConnectWithMetrics(String name, String namespaceName, String clusterName, int kafkaConnectReplicas) { - KafkaConnect kafkaConnect = getKafkaConnectFromYaml(TestConstants.PATH_TO_KAFKA_CONNECT_METRICS_CONFIG); - createOrReplaceConnectMetrics(namespaceName); - return defaultKafkaConnect(kafkaConnect, namespaceName, name, clusterName, kafkaConnectReplicas); + public static KafkaConnectBuilder kafkaConnectWithMetricsAndFileSinkPlugin( + final String namespaceName, + final String kafkaConnectClusterName, + final String kafkaClusterName, + final int replicas + ) { + return kafkaConnectWithFilePlugin(namespaceName, kafkaConnectClusterName, kafkaClusterName, replicas) + .editOrNewSpec() + .withNewJmxPrometheusExporterMetricsConfig() + .withNewValueFrom() + .withNewConfigMapKeyRef(CONFIG_MAP_KEY, getConfigMapName(kafkaConnectClusterName), false) + .endValueFrom() + .endJmxPrometheusExporterMetricsConfig() + .endSpec(); } - public static KafkaConnectBuilder kafkaConnectWithMetricsAndFileSinkPlugin(String name, String namespaceName, String clusterName, int replicas) { - createOrReplaceConnectMetrics(namespaceName); - return kafkaConnectWithFilePlugin(name, namespaceName, clusterName, replicas, TestConstants.PATH_TO_KAFKA_CONNECT_METRICS_CONFIG); + public static ConfigMap connectMetricsConfigMap(String namespaceName, String kafkaConnectClusterName) { + return new ConfigMapBuilder(TestUtils.configMapFromYaml(TestConstants.PATH_TO_KAFKA_CONNECT_METRICS_CONFIG, "connect-metrics")) + .editOrNewMetadata() + .withNamespace(namespaceName) + .withName(getConfigMapName(kafkaConnectClusterName)) + .endMetadata() + .build(); } - private static void createOrReplaceConnectMetrics(String namespaceName) { - ConfigMap metricsCm = TestUtils.configMapFromYaml(TestConstants.PATH_TO_KAFKA_CONNECT_METRICS_CONFIG, "connect-metrics"); - kubeClient().createConfigMapInNamespace(namespaceName, metricsCm); + private static String getConfigMapName(String kafkaConnectClusterName) { + return kafkaConnectClusterName + METRICS_CONNECT_CONFIG_MAP_SUFFIX; } - private static KafkaConnectBuilder defaultKafkaConnect(KafkaConnect kafkaConnect, final String namespaceName, String name, String kafkaClusterName, int kafkaConnectReplicas) { - return new KafkaConnectBuilder(kafkaConnect) + private static KafkaConnectBuilder defaultKafkaConnect( + final String namespaceName, + String kafkaConnectClusterName, + String kafkaClusterName, + int kafkaConnectReplicas + ) { + return new KafkaConnectBuilder() .withNewMetadata() - .withName(name) + .withName(kafkaConnectClusterName) .withNamespace(namespaceName) .endMetadata() .editOrNewSpec() @@ -77,47 +96,51 @@ private static KafkaConnectBuilder defaultKafkaConnect(KafkaConnect kafkaConnect .withBootstrapServers(KafkaResources.tlsBootstrapAddress(kafkaClusterName)) .withReplicas(kafkaConnectReplicas) .withNewTls() - .withTrustedCertificates(new CertSecretSourceBuilder().withSecretName(kafkaClusterName + "-cluster-ca-cert").withCertificate("ca.crt").build()) + .withTrustedCertificates( + new CertSecretSourceBuilder() + .withSecretName(KafkaResources.clusterCaCertificateSecretName(kafkaClusterName)) + .withCertificate("ca.crt") + .build() + ) .endTls() - .addToConfig("group.id", KafkaConnectResources.componentName(name)) - .addToConfig("offset.storage.topic", KafkaConnectResources.configStorageTopicOffsets(name)) - .addToConfig("config.storage.topic", KafkaConnectResources.metricsAndLogConfigMapName(name)) - .addToConfig("status.storage.topic", KafkaConnectResources.configStorageTopicStatus(name)) + .addToConfig("group.id", KafkaConnectResources.componentName(kafkaConnectClusterName)) + .addToConfig("offset.storage.topic", KafkaConnectResources.configStorageTopicOffsets(kafkaConnectClusterName)) + .addToConfig("config.storage.topic", KafkaConnectResources.metricsAndLogConfigMapName(kafkaConnectClusterName)) + .addToConfig("status.storage.topic", KafkaConnectResources.configStorageTopicStatus(kafkaConnectClusterName)) + .addToConfig("config.storage.replication.factor", "-1") + .addToConfig("offset.storage.replication.factor", "-1") + .addToConfig("status.storage.replication.factor", "-1") .withNewInlineLogging() .addToLoggers("connect.root.logger.level", "DEBUG") .endInlineLogging() .endSpec(); } - public static KafkaConnectBuilder kafkaConnectWithFilePlugin(String clusterName, String namespaceName, int replicas) { - return kafkaConnectWithFilePlugin(clusterName, namespaceName, clusterName, replicas); - } - - public static KafkaConnectBuilder kafkaConnectWithFilePlugin(String name, String namespaceName, String clusterName, int replicas) { - return kafkaConnectWithFilePlugin(name, namespaceName, clusterName, replicas, TestConstants.PATH_TO_KAFKA_CONNECT_CONFIG); + public static KafkaConnectBuilder kafkaConnectWithFilePlugin(String namespaceName, String kafkaClusterName, int replicas) { + return kafkaConnectWithFilePlugin(namespaceName, kafkaClusterName, kafkaClusterName, replicas); } /** * Method for creating the KafkaConnect builder with File plugin - using the KafkaConnect build feature. - * @param name Name for the KafkaConnect resource * @param namespaceName namespace, where the KafkaConnect resource will be deployed - * @param clusterName name of the Kafka cluster + * @param kafkaConnectClusterName Name for the KafkaConnect resource + * @param kafkaClusterName name of the Kafka cluster * @param replicas number of KafkaConnect replicas * @return KafkaConnect builder with File plugin */ - public static KafkaConnectBuilder kafkaConnectWithFilePlugin(String name, String namespaceName, String clusterName, int replicas, String pathToConnectConfig) { - return addFileSinkPluginOrImage(namespaceName, kafkaConnect(name, namespaceName, clusterName, replicas, pathToConnectConfig)); + public static KafkaConnectBuilder kafkaConnectWithFilePlugin(String namespaceName, String kafkaConnectClusterName, String kafkaClusterName, int replicas) { + return addFileSinkPluginOrImage(namespaceName, kafkaConnect(namespaceName, kafkaConnectClusterName, kafkaClusterName, replicas)); } /** * Method for adding Connect Build with file-sink plugin to the Connect spec or set Connect's image in case that * the image is set in `CONNECT_IMAGE_WITH_FILE_SINK_PLUGIN` env. variable * @param namespaceName namespace for output registry - * @param connectBuilder builder of the Connect resource + * @param kafkaConnectBuilder builder of the Connect resource * @return updated Connect resource in builder */ @SuppressFBWarnings("DMI_RANDOM_USED_ONLY_ONCE") - public static KafkaConnectBuilder addFileSinkPluginOrImage(String namespaceName, KafkaConnectBuilder connectBuilder) { + public static KafkaConnectBuilder addFileSinkPluginOrImage(String namespaceName, KafkaConnectBuilder kafkaConnectBuilder) { if (!KubeClusterResource.getInstance().isMicroShift() && Environment.CONNECT_IMAGE_WITH_FILE_SINK_PLUGIN.isEmpty()) { final Plugin fileSinkPlugin = new PluginBuilder() .withName("file-plugin") @@ -130,7 +153,7 @@ public static KafkaConnectBuilder addFileSinkPluginOrImage(String namespaceName, final String imageFullPath = Environment.getImageOutputRegistry(namespaceName, TestConstants.ST_CONNECT_BUILD_IMAGE_NAME, String.valueOf(new Random().nextInt(Integer.MAX_VALUE))); - return connectBuilder + return kafkaConnectBuilder .editOrNewSpec() .editOrNewBuild() .withPlugins(fileSinkPlugin) @@ -144,7 +167,7 @@ public static KafkaConnectBuilder addFileSinkPluginOrImage(String namespaceName, LOGGER.info("Using {} image from {} env variable", Environment.CONNECT_IMAGE_WITH_FILE_SINK_PLUGIN, Environment.CONNECT_IMAGE_WITH_FILE_SINK_PLUGIN_ENV); - return connectBuilder + return kafkaConnectBuilder .editOrNewSpec() .withImage(Environment.CONNECT_IMAGE_WITH_FILE_SINK_PLUGIN) .endSpec(); @@ -166,8 +189,4 @@ public static DockerOutput dockerOutput(String imageName) { return dockerOutputBuilder.build(); } - - private static KafkaConnect getKafkaConnectFromYaml(String yamlPath) { - return TestUtils.configFromYaml(yamlPath, KafkaConnect.class); - } } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaConnectorTemplates.java b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaConnectorTemplates.java index 709d230a58..eee176b342 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaConnectorTemplates.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaConnectorTemplates.java @@ -4,52 +4,57 @@ */ package io.strimzi.systemtest.templates.crd; -import io.strimzi.api.kafka.model.connector.KafkaConnector; import io.strimzi.api.kafka.model.connector.KafkaConnectorBuilder; import io.strimzi.operator.common.model.Labels; -import io.strimzi.systemtest.TestConstants; -import io.strimzi.systemtest.resources.ResourceManager; -import io.strimzi.test.TestUtils; public class KafkaConnectorTemplates { - private KafkaConnectorTemplates() {} + private static final int DEFAULT_MAX_TASKS = 2; - public static KafkaConnectorBuilder kafkaConnector(String name) { - return kafkaConnector(name, name, 2); - } + private KafkaConnectorTemplates() {} - public static KafkaConnectorBuilder kafkaConnector(String name, int maxTasks) { - return kafkaConnector(name, name, maxTasks); + public static KafkaConnectorBuilder kafkaConnector(String namespaceName, String kafkaConnectClusterName) { + return kafkaConnector(namespaceName, kafkaConnectClusterName, DEFAULT_MAX_TASKS); } - public static KafkaConnectorBuilder kafkaConnector(String name, String clusterName) { - return kafkaConnector(name, clusterName, 2); + public static KafkaConnectorBuilder kafkaConnector(String namespaceName, String kafkaConnectClusterName, int maxTasks) { + return kafkaConnector(namespaceName, kafkaConnectClusterName, kafkaConnectClusterName, maxTasks); } - public static KafkaConnectorBuilder kafkaConnector(String name, String clusterName, int maxTasks) { - KafkaConnector kafkaConnector = getKafkaConnectorFromYaml(TestConstants.PATH_TO_KAFKA_CONNECTOR_CONFIG); - return defaultKafkaConnector(kafkaConnector, name, clusterName, maxTasks); + public static KafkaConnectorBuilder kafkaConnector( + String namespaceName, + String connectorName, + String kafkaConnectClusterName + ) { + return kafkaConnector(namespaceName, connectorName, kafkaConnectClusterName, DEFAULT_MAX_TASKS); } - public static KafkaConnectorBuilder defaultKafkaConnector(String name, String clusterName, int maxTasks) { - KafkaConnector kafkaConnector = getKafkaConnectorFromYaml(TestConstants.PATH_TO_KAFKA_CONNECTOR_CONFIG); - return defaultKafkaConnector(kafkaConnector, name, clusterName, maxTasks); + public static KafkaConnectorBuilder kafkaConnector( + String namespaceName, + String connectorName, + String kafkaConnectClusterName, + int maxTasks + ) { + return defaultKafkaConnector(namespaceName, connectorName, kafkaConnectClusterName, maxTasks); } - public static KafkaConnectorBuilder defaultKafkaConnector(KafkaConnector kafkaConnector, String name, String kafkaConnectClusterName, int maxTasks) { - return new KafkaConnectorBuilder(kafkaConnector) - .editOrNewMetadata() - .withName(name) - .withNamespace(ResourceManager.kubeClient().getNamespace()) + private static KafkaConnectorBuilder defaultKafkaConnector( + String namespaceName, + String connectorName, + String kafkaConnectClusterName, + int maxTasks + ) { + return new KafkaConnectorBuilder() + .withNewMetadata() + .withName(connectorName) + .withNamespace(namespaceName) .addToLabels(Labels.STRIMZI_CLUSTER_LABEL, kafkaConnectClusterName) .endMetadata() .editOrNewSpec() .withTasksMax(maxTasks) + .withClassName("org.apache.kafka.connect.file.FileStreamSourceConnector") + .addToConfig("file", "/opt/kafka/LICENSE") + .addToConfig("topic", "my-topic") .endSpec(); } - - private static KafkaConnector getKafkaConnectorFromYaml(String yamlPath) { - return TestUtils.configFromYaml(yamlPath, KafkaConnector.class); - } } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaMirrorMaker2Templates.java b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaMirrorMaker2Templates.java index 00add12a9f..79de1f2694 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaMirrorMaker2Templates.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaMirrorMaker2Templates.java @@ -5,91 +5,158 @@ package io.strimzi.systemtest.templates.crd; import io.fabric8.kubernetes.api.model.ConfigMap; +import io.fabric8.kubernetes.api.model.ConfigMapBuilder; import io.fabric8.kubernetes.api.model.Quantity; import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder; import io.strimzi.api.kafka.model.common.CertSecretSourceBuilder; import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.mirrormaker2.KafkaMirrorMaker2; import io.strimzi.api.kafka.model.mirrormaker2.KafkaMirrorMaker2Builder; import io.strimzi.api.kafka.model.mirrormaker2.KafkaMirrorMaker2ClusterSpec; import io.strimzi.api.kafka.model.mirrormaker2.KafkaMirrorMaker2ClusterSpecBuilder; import io.strimzi.systemtest.Environment; import io.strimzi.systemtest.TestConstants; -import io.strimzi.systemtest.resources.ResourceManager; import io.strimzi.systemtest.storage.TestStorage; import io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils; import io.strimzi.test.TestUtils; -import static io.strimzi.test.k8s.KubeClusterResource.kubeClient; - public class KafkaMirrorMaker2Templates { private KafkaMirrorMaker2Templates() {} + private static final String METRICS_MM2_CONFIG_MAP_SUFFIX = "-mm2-metrics"; + private static final String CONFIG_MAP_KEY = "metrics-config.yml"; + public static KafkaMirrorMaker2Builder kafkaMirrorMaker2(TestStorage testStorage, int kafkaMirrorMaker2Replicas, boolean tlsListener) { - return kafkaMirrorMaker2(testStorage.getClusterName(), testStorage.getTargetClusterName(), testStorage.getSourceClusterName(), kafkaMirrorMaker2Replicas, tlsListener); + return kafkaMirrorMaker2(testStorage.getNamespaceName(), testStorage.getClusterName(), testStorage.getSourceClusterName(), testStorage.getTargetClusterName(), kafkaMirrorMaker2Replicas, tlsListener); } - public static KafkaMirrorMaker2Builder kafkaMirrorMaker2(String name, String targetClusterName, String sourceClusterName, int kafkaMirrorMaker2Replicas, boolean tlsListener) { - KafkaMirrorMaker2 kafkaMirrorMaker2 = getKafkaMirrorMaker2FromYaml(TestConstants.PATH_TO_KAFKA_MIRROR_MAKER_2_CONFIG); - return defaultKafkaMirrorMaker2(kafkaMirrorMaker2, name, targetClusterName, sourceClusterName, kafkaMirrorMaker2Replicas, tlsListener); + public static KafkaMirrorMaker2Builder kafkaMirrorMaker2( + String namespaceName, + String kafkaMirrorMaker2Name, + String sourceKafkaClusterName, + String targetKafkaClusterName, + int kafkaMirrorMaker2Replicas, + boolean tlsListener + ) { + return defaultKafkaMirrorMaker2(namespaceName, kafkaMirrorMaker2Name, sourceKafkaClusterName, targetKafkaClusterName, kafkaMirrorMaker2Replicas, tlsListener); } - public static KafkaMirrorMaker2Builder kafkaMirrorMaker2WithMetrics(String namespaceName, String name, String targetClusterName, String sourceClusterName, int kafkaMirrorMaker2Replicas, String sourceNs, String targetNs) { - KafkaMirrorMaker2 kafkaMirrorMaker2 = getKafkaMirrorMaker2FromYaml(TestConstants.PATH_TO_KAFKA_MIRROR_MAKER_2_METRICS_CONFIG); - ConfigMap metricsCm = TestUtils.configMapFromYaml(TestConstants.PATH_TO_KAFKA_MIRROR_MAKER_2_METRICS_CONFIG, "mirror-maker-2-metrics"); - kubeClient().createConfigMapInNamespace(namespaceName, metricsCm); - return defaultKafkaMirrorMaker2(kafkaMirrorMaker2, name, targetClusterName, sourceClusterName, kafkaMirrorMaker2Replicas, false, sourceNs, targetNs); + public static KafkaMirrorMaker2Builder kafkaMirrorMaker2WithMetrics( + String namespaceName, + String mm2name, + String sourceKafkaClusterName, + String targetKafkaClusterName, + int kafkaMirrorMaker2Replicas, + String sourceNs, + String targetNs + ) { + return defaultKafkaMirrorMaker2(namespaceName, mm2name, sourceKafkaClusterName, targetKafkaClusterName, kafkaMirrorMaker2Replicas, false, sourceNs, targetNs) + .editOrNewSpec() + .withNewJmxPrometheusExporterMetricsConfig() + .withNewValueFrom() + .withNewConfigMapKeyRef(CONFIG_MAP_KEY, getConfigMapName(mm2name), false) + .endValueFrom() + .endJmxPrometheusExporterMetricsConfig() + .editFirstMirror() + .withNewHeartbeatConnector() + .addToConfig("checkpoints.topic.replication.factor", -1) + .endHeartbeatConnector() + .endMirror() + .endSpec(); } - private static KafkaMirrorMaker2Builder defaultKafkaMirrorMaker2(KafkaMirrorMaker2 kafkaMirrorMaker2, String name, String kafkaTargetClusterName, String kafkaSourceClusterName, int kafkaMirrorMaker2Replicas, boolean tlsListener) { - return defaultKafkaMirrorMaker2(kafkaMirrorMaker2, name, kafkaTargetClusterName, kafkaSourceClusterName, kafkaMirrorMaker2Replicas, tlsListener, null, null); + public static ConfigMap mirrorMaker2MetricsConfigMap(String namespaceName, String kafkaMirrorMaker2Name) { + return new ConfigMapBuilder(TestUtils.configMapFromYaml(TestConstants.PATH_TO_KAFKA_MIRROR_MAKER_2_METRICS_CONFIG, "mirror-maker-2-metrics")) + .editOrNewMetadata() + .withNamespace(namespaceName) + .withName(getConfigMapName(kafkaMirrorMaker2Name)) + .endMetadata() + .build(); } - private static KafkaMirrorMaker2Builder defaultKafkaMirrorMaker2(KafkaMirrorMaker2 kafkaMirrorMaker2, String name, String kafkaTargetClusterName, String kafkaSourceClusterName, int kafkaMirrorMaker2Replicas, boolean tlsListener, String sourceNs, String targetNs) { + private static String getConfigMapName(String kafkaMirrorMaker2Name) { + return kafkaMirrorMaker2Name + METRICS_MM2_CONFIG_MAP_SUFFIX; + } + + private static KafkaMirrorMaker2Builder defaultKafkaMirrorMaker2( + String namespaceName, + String kafkaMirrorMaker2Name, + String sourceKafkaClusterName, + String targetKafkaClusterName, + int kafkaMirrorMaker2Replicas, + boolean tlsListener + ) { + return defaultKafkaMirrorMaker2(namespaceName, kafkaMirrorMaker2Name, sourceKafkaClusterName, targetKafkaClusterName, kafkaMirrorMaker2Replicas, tlsListener, null, null); + } + + private static KafkaMirrorMaker2Builder defaultKafkaMirrorMaker2( + String namespaceName, + String kafkaMirrorMaker2Name, + String sourceKafkaClusterName, + String targetKafkaClusterName, + int kafkaMirrorMaker2Replicas, + boolean tlsListener, + String sourceNs, + String targetNs + ) { KafkaMirrorMaker2ClusterSpec targetClusterSpec = new KafkaMirrorMaker2ClusterSpecBuilder() - .withAlias(kafkaTargetClusterName) - .withBootstrapServers(targetNs == null ? KafkaResources.plainBootstrapAddress(kafkaTargetClusterName) : KafkaUtils.namespacedPlainBootstrapAddress(kafkaTargetClusterName, targetNs)) + .withAlias(targetKafkaClusterName) + .withBootstrapServers(targetNs == null ? KafkaResources.plainBootstrapAddress(targetKafkaClusterName) : KafkaUtils.namespacedPlainBootstrapAddress(targetKafkaClusterName, targetNs)) .addToConfig("config.storage.replication.factor", -1) .addToConfig("offset.storage.replication.factor", -1) .addToConfig("status.storage.replication.factor", -1) .build(); KafkaMirrorMaker2ClusterSpec sourceClusterSpec = new KafkaMirrorMaker2ClusterSpecBuilder() - .withAlias(kafkaSourceClusterName) - .withBootstrapServers(sourceNs == null ? KafkaResources.plainBootstrapAddress(kafkaSourceClusterName) : KafkaUtils.namespacedPlainBootstrapAddress(kafkaSourceClusterName, sourceNs)) + .withAlias(sourceKafkaClusterName) + .withBootstrapServers(sourceNs == null ? KafkaResources.plainBootstrapAddress(sourceKafkaClusterName) : KafkaUtils.namespacedPlainBootstrapAddress(sourceKafkaClusterName, sourceNs)) .build(); if (tlsListener) { targetClusterSpec = new KafkaMirrorMaker2ClusterSpecBuilder(targetClusterSpec) - .withBootstrapServers(targetNs == null ? KafkaResources.tlsBootstrapAddress(kafkaTargetClusterName) : KafkaUtils.namespacedTlsBootstrapAddress(kafkaTargetClusterName, targetNs)) + .withBootstrapServers(targetNs == null ? KafkaResources.tlsBootstrapAddress(targetKafkaClusterName) : KafkaUtils.namespacedTlsBootstrapAddress(targetKafkaClusterName, targetNs)) .withNewTls() - .withTrustedCertificates(new CertSecretSourceBuilder().withSecretName(KafkaResources.clusterCaCertificateSecretName(kafkaTargetClusterName)).withCertificate("ca.crt").build()) + .withTrustedCertificates(new CertSecretSourceBuilder().withSecretName(KafkaResources.clusterCaCertificateSecretName(targetKafkaClusterName)).withCertificate("ca.crt").build()) .endTls() .build(); sourceClusterSpec = new KafkaMirrorMaker2ClusterSpecBuilder(sourceClusterSpec) - .withBootstrapServers(sourceNs == null ? KafkaResources.tlsBootstrapAddress(kafkaSourceClusterName) : KafkaUtils.namespacedTlsBootstrapAddress(kafkaSourceClusterName, sourceNs)) + .withBootstrapServers(sourceNs == null ? KafkaResources.tlsBootstrapAddress(sourceKafkaClusterName) : KafkaUtils.namespacedTlsBootstrapAddress(sourceKafkaClusterName, sourceNs)) .withNewTls() - .withTrustedCertificates(new CertSecretSourceBuilder().withSecretName(KafkaResources.clusterCaCertificateSecretName(kafkaSourceClusterName)).withCertificate("ca.crt").build()) + .withTrustedCertificates(new CertSecretSourceBuilder().withSecretName(KafkaResources.clusterCaCertificateSecretName(sourceKafkaClusterName)).withCertificate("ca.crt").build()) .endTls() .build(); } - KafkaMirrorMaker2Builder kmm2b = new KafkaMirrorMaker2Builder(kafkaMirrorMaker2) + KafkaMirrorMaker2Builder kmm2b = new KafkaMirrorMaker2Builder() .withNewMetadata() - .withName(name) - .withNamespace(ResourceManager.kubeClient().getNamespace()) + .withName(kafkaMirrorMaker2Name) + .withNamespace(namespaceName) .endMetadata() .editOrNewSpec() .withVersion(Environment.ST_KAFKA_VERSION) .withReplicas(kafkaMirrorMaker2Replicas) - .withConnectCluster(kafkaTargetClusterName) + .withConnectCluster(targetKafkaClusterName) .withClusters(targetClusterSpec, sourceClusterSpec) - .editFirstMirror() - .withSourceCluster(kafkaSourceClusterName) - .withTargetCluster(kafkaTargetClusterName) + .addNewMirror() + .withSourceCluster(sourceKafkaClusterName) + .withTargetCluster(targetKafkaClusterName) + .withNewSourceConnector() + .withTasksMax(1) + .addToConfig("replication.factor", -1) + .addToConfig("offset-syncs.topic.replication.factor", -1) + .addToConfig("sync.topic.acls.enabled", "false") + .addToConfig("refresh.topics.interval.seconds", 600) + .endSourceConnector() + .withNewCheckpointConnector() + .withTasksMax(1) + .addToConfig("checkpoints.topic.replication.factor", -1) + .addToConfig("sync.group.offsets.enabled", "false") + .addToConfig("refresh.groups.interval.seconds", 600) + .endCheckpointConnector() + .withTopicsPattern(".*") + .withGroupsPattern(".*") .endMirror() .withNewInlineLogging() .addToLoggers("connect.root.logger.level", "DEBUG") @@ -108,8 +175,4 @@ private static KafkaMirrorMaker2Builder defaultKafkaMirrorMaker2(KafkaMirrorMake return kmm2b; } - - private static KafkaMirrorMaker2 getKafkaMirrorMaker2FromYaml(String yamlPath) { - return TestUtils.configFromYaml(yamlPath, KafkaMirrorMaker2.class); - } } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaMirrorMakerTemplates.java b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaMirrorMakerTemplates.java index 469c903fcf..3f18eb1d05 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaMirrorMakerTemplates.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaMirrorMakerTemplates.java @@ -7,12 +7,8 @@ import io.fabric8.kubernetes.api.model.Quantity; import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder; import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.mirrormaker.KafkaMirrorMaker; import io.strimzi.api.kafka.model.mirrormaker.KafkaMirrorMakerBuilder; import io.strimzi.systemtest.Environment; -import io.strimzi.systemtest.TestConstants; -import io.strimzi.systemtest.resources.ResourceManager; -import io.strimzi.test.TestUtils; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.producer.ProducerConfig; @@ -22,22 +18,31 @@ public class KafkaMirrorMakerTemplates { private KafkaMirrorMakerTemplates() {} - public static KafkaMirrorMakerBuilder kafkaMirrorMaker(String name, String sourceBootstrapServer, String targetBootstrapServer, String groupId, int mirrorMakerReplicas, boolean tlsListener) { - KafkaMirrorMaker kafkaMirrorMaker = getKafkaMirrorMakerFromYaml(TestConstants.PATH_TO_KAFKA_MIRROR_MAKER_CONFIG); - return defaultKafkaMirrorMaker(kafkaMirrorMaker, name, sourceBootstrapServer, targetBootstrapServer, groupId, mirrorMakerReplicas, tlsListener); + public static KafkaMirrorMakerBuilder kafkaMirrorMaker( + String namespaceName, + String kafkaMirrorMaker1Name, + String sourceBootstrapServer, + String targetBootstrapServer, + String groupId, + int mirrorMakerReplicas, + boolean tlsListener + ) { + return defaultKafkaMirrorMaker(namespaceName, kafkaMirrorMaker1Name, sourceBootstrapServer, targetBootstrapServer, groupId, mirrorMakerReplicas, tlsListener); } - private static KafkaMirrorMakerBuilder defaultKafkaMirrorMaker(KafkaMirrorMaker kafkaMirrorMaker, - String name, - String sourceBootstrapServer, - String targetBootstrapServer, - String groupId, - int kafkaMirrorMakerReplicas, - boolean tlsListener) { - KafkaMirrorMakerBuilder kmmb = new KafkaMirrorMakerBuilder(kafkaMirrorMaker) + private static KafkaMirrorMakerBuilder defaultKafkaMirrorMaker( + String namespaceName, + String kafkaMirrorMaker1Name, + String sourceBootstrapServer, + String targetBootstrapServer, + String groupId, + int kafkaMirrorMakerReplicas, + boolean tlsListener + ) { + KafkaMirrorMakerBuilder kmmb = new KafkaMirrorMakerBuilder() .withNewMetadata() - .withName(name) - .withNamespace(ResourceManager.kubeClient().getNamespace()) + .withName(kafkaMirrorMaker1Name) + .withNamespace(namespaceName) .endMetadata() .editSpec() .withVersion(Environment.ST_KAFKA_VERSION) @@ -70,8 +75,4 @@ private static KafkaMirrorMakerBuilder defaultKafkaMirrorMaker(KafkaMirrorMaker return kmmb; } - - private static KafkaMirrorMaker getKafkaMirrorMakerFromYaml(String yamlPath) { - return TestUtils.configFromYaml(yamlPath, KafkaMirrorMaker.class); - } } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaRebalanceTemplates.java b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaRebalanceTemplates.java index 6bd2d43169..9e10157856 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaRebalanceTemplates.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaRebalanceTemplates.java @@ -4,11 +4,7 @@ */ package io.strimzi.systemtest.templates.crd; -import io.strimzi.api.kafka.model.rebalance.KafkaRebalance; import io.strimzi.api.kafka.model.rebalance.KafkaRebalanceBuilder; -import io.strimzi.systemtest.TestConstants; -import io.strimzi.systemtest.resources.ResourceManager; -import io.strimzi.test.TestUtils; import java.util.HashMap; import java.util.Map; @@ -17,25 +13,23 @@ public class KafkaRebalanceTemplates { private KafkaRebalanceTemplates() {} - public static KafkaRebalanceBuilder kafkaRebalance(String name) { - KafkaRebalance kafkaRebalance = getKafkaRebalanceFromYaml(TestConstants.PATH_TO_KAFKA_REBALANCE_CONFIG); - return defaultKafkaRebalance(kafkaRebalance, name); + public static KafkaRebalanceBuilder kafkaRebalance(String namespaceName, String kafkaClusterName) { + return defaultKafkaRebalance(namespaceName, kafkaClusterName); } - private static KafkaRebalanceBuilder defaultKafkaRebalance(KafkaRebalance kafkaRebalance, String name) { + private static KafkaRebalanceBuilder defaultKafkaRebalance(String namespaceName, String kafkaClusterName) { Map kafkaRebalanceLabels = new HashMap<>(); - kafkaRebalanceLabels.put("strimzi.io/cluster", name); + kafkaRebalanceLabels.put("strimzi.io/cluster", kafkaClusterName); - return new KafkaRebalanceBuilder(kafkaRebalance) + return new KafkaRebalanceBuilder() .editMetadata() - .withName(name) - .withNamespace(ResourceManager.kubeClient().getNamespace()) + .withName(kafkaClusterName) + .withNamespace(namespaceName) .withLabels(kafkaRebalanceLabels) - .endMetadata(); - } - - private static KafkaRebalance getKafkaRebalanceFromYaml(String yamlPath) { - return TestUtils.configFromYaml(yamlPath, KafkaRebalance.class); + .endMetadata() + // spec cannot be null, that's why the `withNewSpec` is used here. + .withNewSpec() + .endSpec(); } } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaTemplates.java b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaTemplates.java index 0f7b8463cc..2d2ea2574b 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaTemplates.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaTemplates.java @@ -12,7 +12,6 @@ import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder; import io.strimzi.api.kafka.model.common.metrics.JmxPrometheusExporterMetrics; import io.strimzi.api.kafka.model.common.metrics.JmxPrometheusExporterMetricsBuilder; -import io.strimzi.api.kafka.model.kafka.Kafka; import io.strimzi.api.kafka.model.kafka.KafkaBuilder; import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; @@ -24,15 +23,10 @@ import java.util.Collections; -import static io.strimzi.systemtest.resources.ResourceManager.kubeClient; - public class KafkaTemplates { private KafkaTemplates() {} - private static final String PATH_TO_KAFKA_EPHEMERAL_KRAFT_EXAMPLE = TestConstants.PATH_TO_PACKAGING_EXAMPLES + "/kafka/kraft/kafka-ephemeral.yaml"; - private static final String PATH_TO_KAFKA_PERSISTENT_KRAFT_EXAMPLE = TestConstants.PATH_TO_PACKAGING_EXAMPLES + "/kafka/kraft/kafka.yaml"; - private static final String PATH_TO_KAFKA_PERSISTENT_NODE_POOLS_EXAMPLE = TestConstants.PATH_TO_PACKAGING_EXAMPLES + "/kafka/kafka-with-node-pools.yaml"; private static final String KAFKA_METRICS_CONFIG_REF_KEY = "kafka-metrics-config.yml"; private static final String ZOOKEEPER_METRICS_CONFIG_REF_KEY = "zookeeper-metrics-config.yml"; private static final String METRICS_KAFKA_CONFIG_MAP_SUFFIX = "-kafka-metrics"; @@ -42,70 +36,54 @@ private KafkaTemplates() {} // Kafka Ephemeral // ------------------------------------------------------------------------------------------- - public static KafkaBuilder kafkaEphemeral(String clusterName, int kafkaReplicas) { - return kafkaEphemeral(clusterName, kafkaReplicas, Math.min(kafkaReplicas, 3)); + public static KafkaBuilder kafkaEphemeral(String namespaceName, String kafkaClusterName, int kafkaReplicas) { + return kafkaEphemeral(namespaceName, kafkaClusterName, kafkaReplicas, Math.min(kafkaReplicas, 3)); } - public static KafkaBuilder kafkaEphemeral(String clusterName, int kafkaReplicas, int zookeeperReplicas) { + public static KafkaBuilder kafkaEphemeral(String namespaceName, String kafkaClusterName, int kafkaReplicas, int zookeeperReplicas) { if (Environment.isKafkaNodePoolsEnabled()) { if (Environment.isKRaftModeEnabled()) { - return kafkaEphemeralKRaft(clusterName, kafkaReplicas); + return kafkaEphemeralKRaft(namespaceName, kafkaClusterName, kafkaReplicas); } - return kafkaEphemeralNodePools(clusterName, kafkaReplicas, zookeeperReplicas); + return kafkaEphemeralNodePools(namespaceName, kafkaClusterName, kafkaReplicas, zookeeperReplicas); } else { - return kafkaEphemeralWithoutNodePools(clusterName, kafkaReplicas, zookeeperReplicas); + return kafkaEphemeralWithoutNodePools(namespaceName, kafkaClusterName, kafkaReplicas, zookeeperReplicas); } } - public static KafkaBuilder kafkaEphemeralWithoutNodePools(String clusterName, int kafkaReplicas, int zookeeperReplicas) { - Kafka kafka = getKafkaFromYaml(TestConstants.PATH_TO_KAFKA_EPHEMERAL_CONFIG, false); - return defaultKafkaWithoutNodePools(kafka, clusterName, kafkaReplicas, zookeeperReplicas); + public static KafkaBuilder kafkaEphemeralWithoutNodePools(String namespaceName, String kafkaClusterName, int kafkaReplicas, int zookeeperReplicas) { + return defaultKafkaWithoutNodePools(namespaceName, kafkaClusterName, kafkaReplicas, zookeeperReplicas); } - public static KafkaBuilder kafkaEphemeralNodePools(String clusterName, int kafkaReplicas, int zookeeperReplicas) { - Kafka kafka = getKafkaFromYaml(PATH_TO_KAFKA_PERSISTENT_NODE_POOLS_EXAMPLE, true); - - kafka.getSpec().getZookeeper().setStorage(null); - - return defaultKafkaNodePools(kafka, clusterName, kafkaReplicas, zookeeperReplicas) - .editSpec() - .editZookeeper() - // the NodePools (in ZK mode) example contains persistent storage for ZK, so we need to specify the - // ephemeral storage - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec(); + public static KafkaBuilder kafkaEphemeralNodePools(String namespaceName, String kafkaClusterName, int kafkaReplicas, int zookeeperReplicas) { + return defaultKafkaNodePools(namespaceName, kafkaClusterName, kafkaReplicas, zookeeperReplicas); } - public static KafkaBuilder kafkaEphemeralKRaft(String clusterName, int kafkaReplicas) { - Kafka kafka = getKafkaFromYaml(PATH_TO_KAFKA_EPHEMERAL_KRAFT_EXAMPLE, true); - return defaultKafkaKRaft(kafka, clusterName, kafkaReplicas); + public static KafkaBuilder kafkaEphemeralKRaft(String namespaceName, String kafkaClusterName, int kafkaReplicas) { + return defaultKafkaKRaft(namespaceName, kafkaClusterName, kafkaReplicas); } // ------------------------------------------------------------------------------------------- // Kafka Persistent // ------------------------------------------------------------------------------------------- - public static KafkaBuilder kafkaPersistent(String clusterName, int kafkaReplicas) { - return kafkaPersistent(clusterName, kafkaReplicas, Math.min(kafkaReplicas, 3)); + public static KafkaBuilder kafkaPersistent(String namespaceName, String kafkaClusterName, int kafkaReplicas) { + return kafkaPersistent(namespaceName, kafkaClusterName, kafkaReplicas, Math.min(kafkaReplicas, 3)); } - public static KafkaBuilder kafkaPersistent(String clusterName, int kafkaReplicas, int zookeeperReplicas) { + public static KafkaBuilder kafkaPersistent(String namespaceName, String kafkaClusterName, int kafkaReplicas, int zookeeperReplicas) { if (Environment.isKafkaNodePoolsEnabled()) { if (Environment.isKRaftModeEnabled()) { - return kafkaPersistentKRaft(clusterName, kafkaReplicas); + return kafkaPersistentKRaft(namespaceName, kafkaClusterName, kafkaReplicas); } - return kafkaPersistentNodePools(clusterName, kafkaReplicas, zookeeperReplicas); + return kafkaPersistentNodePools(namespaceName, kafkaClusterName, kafkaReplicas, zookeeperReplicas); } else { - return kafkaPersistentWithoutNodePools(clusterName, kafkaReplicas, zookeeperReplicas); + return kafkaPersistentWithoutNodePools(namespaceName, kafkaClusterName, kafkaReplicas, zookeeperReplicas); } } - public static KafkaBuilder kafkaPersistentWithoutNodePools(String clusterName, int kafkaReplicas, int zookeeperReplicas) { - Kafka kafka = getKafkaFromYaml(TestConstants.PATH_TO_KAFKA_EPHEMERAL_CONFIG, false); - - return defaultKafkaWithoutNodePools(kafka, clusterName, kafkaReplicas, zookeeperReplicas) + public static KafkaBuilder kafkaPersistentWithoutNodePools(String namespaceName, String kafkaClusterName, int kafkaReplicas, int zookeeperReplicas) { + return defaultKafkaWithoutNodePools(namespaceName, kafkaClusterName, kafkaReplicas, zookeeperReplicas) .editSpec() .editKafka() .withNewPersistentClaimStorage() @@ -122,11 +100,8 @@ public static KafkaBuilder kafkaPersistentWithoutNodePools(String clusterName, i .endSpec(); } - public static KafkaBuilder kafkaPersistentNodePools(String clusterName, int kafkaReplicas, int zookeeperReplicas) { - Kafka kafka = getKafkaFromYaml(PATH_TO_KAFKA_PERSISTENT_NODE_POOLS_EXAMPLE, true); - kafka.getSpec().getZookeeper().setStorage(null); - - return defaultKafkaNodePools(kafka, clusterName, kafkaReplicas, zookeeperReplicas) + public static KafkaBuilder kafkaPersistentNodePools(String namespaceName, String kafkaClusterName, int kafkaReplicas, int zookeeperReplicas) { + return defaultKafkaNodePools(namespaceName, kafkaClusterName, kafkaReplicas, zookeeperReplicas) .editSpec() .editZookeeper() .withNewPersistentClaimStorage() @@ -137,23 +112,18 @@ public static KafkaBuilder kafkaPersistentNodePools(String clusterName, int kafk .endSpec(); } - public static KafkaBuilder kafkaPersistentKRaft(String clusterName, int kafkaReplicas) { - Kafka kafka = getKafkaFromYaml(PATH_TO_KAFKA_PERSISTENT_KRAFT_EXAMPLE, true); - return defaultKafkaKRaft(kafka, clusterName, kafkaReplicas); + public static KafkaBuilder kafkaPersistentKRaft(String namespaceName, String kafkaClusterName, int kafkaReplicas) { + return defaultKafkaKRaft(namespaceName, kafkaClusterName, kafkaReplicas); } // ------------------------------------------------------------------------------------------- // Kafka with metrics // ------------------------------------------------------------------------------------------- - public static KafkaBuilder kafkaWithMetrics(String namespaceName, String clusterName, int kafkaReplicas, int zookeeperReplicas) { - Kafka kafka = getKafkaFromYaml(TestConstants.PATH_TO_KAFKA_METRICS_CONFIG, false); - String configMapName = clusterName + METRICS_KAFKA_CONFIG_MAP_SUFFIX; + public static KafkaBuilder kafkaWithMetrics(String namespaceName, String kafkaClusterName, int kafkaReplicas, int zookeeperReplicas) { + String configMapName = kafkaClusterName + METRICS_KAFKA_CONFIG_MAP_SUFFIX; - KafkaBuilder kafkaBuilder = defaultKafka(kafka, clusterName, kafkaReplicas, zookeeperReplicas) - .editMetadata() - .withNamespace(namespaceName) - .endMetadata() + KafkaBuilder kafkaBuilder = defaultKafka(namespaceName, kafkaClusterName, kafkaReplicas, zookeeperReplicas) .editSpec() .withNewKafkaExporter() .endKafkaExporter() @@ -182,8 +152,8 @@ public static KafkaBuilder kafkaWithMetrics(String namespaceName, String cluster return kafkaBuilder; } - public static KafkaBuilder kafkaWithMetricsAndCruiseControlWithMetrics(String namespaceName, String clusterName, int kafkaReplicas, int zookeeperReplicas) { - String ccConfigMapName = clusterName + METRICS_CC_CONFIG_MAP_SUFFIX; + public static KafkaBuilder kafkaWithMetricsAndCruiseControlWithMetrics(String namespaceName, String kafkaClusterName, int kafkaReplicas, int zookeeperReplicas) { + String ccConfigMapName = kafkaClusterName + METRICS_CC_CONFIG_MAP_SUFFIX; ConfigMapKeySelector cmks = new ConfigMapKeySelectorBuilder() .withName(ccConfigMapName) @@ -196,7 +166,7 @@ public static KafkaBuilder kafkaWithMetricsAndCruiseControlWithMetrics(String na .endValueFrom() .build(); - return kafkaWithMetrics(namespaceName, clusterName, kafkaReplicas, zookeeperReplicas) + return kafkaWithMetrics(namespaceName, kafkaClusterName, kafkaReplicas, zookeeperReplicas) .editSpec() .withNewCruiseControl() .withMetricsConfig(jmxPrometheusExporterMetrics) @@ -210,8 +180,8 @@ public static KafkaBuilder kafkaWithMetricsAndCruiseControlWithMetrics(String na // ConfigMaps for Kafka with metrics // ------------------------------------------------------------------------------------------- - public static ConfigMap kafkaMetricsConfigMap(String namespaceName, String clusterName) { - String configMapName = clusterName + METRICS_KAFKA_CONFIG_MAP_SUFFIX; + public static ConfigMap kafkaMetricsConfigMap(String namespaceName, String kafkaClusterName) { + String configMapName = kafkaClusterName + METRICS_KAFKA_CONFIG_MAP_SUFFIX; ConfigMap kafkaMetricsCm = TestUtils.configMapFromYaml(TestConstants.PATH_TO_KAFKA_METRICS_CONFIG, "kafka-metrics"); @@ -223,8 +193,8 @@ public static ConfigMap kafkaMetricsConfigMap(String namespaceName, String clust .build(); } - public static ConfigMap cruiseControlMetricsConfigMap(String namespaceName, String clusterName) { - String configMapName = clusterName + METRICS_CC_CONFIG_MAP_SUFFIX; + public static ConfigMap cruiseControlMetricsConfigMap(String namespaceName, String kafkaClusterName) { + String configMapName = kafkaClusterName + METRICS_CC_CONFIG_MAP_SUFFIX; return new ConfigMapBuilder() .withNewMetadata() @@ -246,16 +216,14 @@ public static ConfigMap cruiseControlMetricsConfigMap(String namespaceName, Stri // Kafka with Cruise Control // ------------------------------------------------------------------------------------------- - public static KafkaBuilder kafkaWithCruiseControl(String clusterName, int kafkaReplicas, int zookeeperReplicas) { - Kafka kafka = getKafkaFromYaml(TestConstants.PATH_TO_KAFKA_CRUISE_CONTROL_CONFIG, false); - - return defaultKafka(kafka, clusterName, kafkaReplicas, zookeeperReplicas) + public static KafkaBuilder kafkaWithCruiseControl(String namespaceName, String kafkaClusterName, int kafkaReplicas, int zookeeperReplicas) { + return defaultKafka(namespaceName, kafkaClusterName, kafkaReplicas, zookeeperReplicas) .editSpec() .editKafka() .addToConfig("cruise.control.metrics.reporter.metrics.reporting.interval.ms", 5_000) .addToConfig("cruise.control.metrics.reporter.metadata.max.age.ms", 4_000) .endKafka() - .editCruiseControl() + .editOrNewCruiseControl() // the following configurations are set for better reliability and stability of CC related tests .addToConfig("max.active.user.tasks", 10) .addToConfig("metric.sampling.interval.ms", 5_000) @@ -265,10 +233,8 @@ public static KafkaBuilder kafkaWithCruiseControl(String clusterName, int kafkaR .endSpec(); } - public static KafkaBuilder kafkaWithCruiseControlTunedForFastModelGeneration(String clusterName, int kafkaReplicas, int zookeeperReplicas) { - Kafka kafka = KafkaTemplates.getKafkaFromYaml(TestConstants.PATH_TO_KAFKA_CRUISE_CONTROL_CONFIG, false); - - return KafkaTemplates.defaultKafka(kafka, clusterName, kafkaReplicas, zookeeperReplicas) + public static KafkaBuilder kafkaWithCruiseControlTunedForFastModelGeneration(String namespaceName, String kafkaClusterName, int kafkaReplicas, int zookeeperReplicas) { + return defaultKafka(namespaceName, kafkaClusterName, kafkaReplicas, zookeeperReplicas) .editSpec() .editKafka() .addToConfig("cruise.control.metrics.reporter.metrics.reporting.interval.ms", 5_000) @@ -276,7 +242,7 @@ public static KafkaBuilder kafkaWithCruiseControlTunedForFastModelGeneration(Str .addToConfig("cruise.control.metrics.topic.replication.factor", 1) .addToConfig("cruise.control.metrics.topic.min.insync.replicas", 1) .endKafka() - .editCruiseControl() + .editOrNewCruiseControl() .addToConfig("max.active.user.tasks", 10) .addToConfig("metric.sampling.interval.ms", 5_000) .addToConfig("cruise.control.metrics.reporter.metrics.reporting.interval.ms", 5_000) @@ -296,22 +262,22 @@ public static KafkaBuilder kafkaWithCruiseControlTunedForFastModelGeneration(Str // Kafka default templates // ------------------------------------------------------------------------------------------- - private static KafkaBuilder defaultKafka(Kafka kafka, String clusterName, int kafkaReplicas, int zookeeperReplicas) { + private static KafkaBuilder defaultKafka(String namespaceName, String kafkaClusterName, int kafkaReplicas, int zookeeperReplicas) { if (Environment.isKafkaNodePoolsEnabled()) { if (Environment.isKRaftModeEnabled()) { - return defaultKafkaKRaft(kafka, clusterName, kafkaReplicas); + return defaultKafkaKRaft(namespaceName, kafkaClusterName, kafkaReplicas); } - return defaultKafkaNodePools(kafka, clusterName, kafkaReplicas, zookeeperReplicas); + return defaultKafkaNodePools(namespaceName, kafkaClusterName, kafkaReplicas, zookeeperReplicas); } else { - return defaultKafkaWithoutNodePools(kafka, clusterName, kafkaReplicas, zookeeperReplicas); + return defaultKafkaWithoutNodePools(namespaceName, kafkaClusterName, kafkaReplicas, zookeeperReplicas); } } - private static KafkaBuilder defaultKafkaWithoutNodePools(Kafka kafka, String clusterName, int kafkaReplicas, int zookeeperReplicas) { - KafkaBuilder kb = new KafkaBuilder(kafka) + private static KafkaBuilder defaultKafkaWithoutNodePools(String namespaceName, String kafkaClusterName, int kafkaReplicas, int zookeeperReplicas) { + KafkaBuilder kb = new KafkaBuilder() .withNewMetadata() - .withName(clusterName) - .withNamespace(kubeClient().getNamespace()) + .withName(kafkaClusterName) + .withNamespace(namespaceName) .endMetadata() .editSpec() .editKafka() @@ -323,15 +289,17 @@ private static KafkaBuilder defaultKafkaWithoutNodePools(Kafka kafka, String clu setDefaultConfigurationOfZookeeperKafka(kb, zookeeperReplicas); setDefaultLogging(kb, true); setMemoryRequestsAndLimitsIfNeeded(kb, true); + setKafkaEphemeralStorage(kb); + setZookeeperEphemeralStorage(kb); return kb; } - private static KafkaBuilder defaultKafkaNodePools(Kafka kafka, String clusterName, int kafkaReplicas, int zookeeperReplicas) { - KafkaBuilder kb = new KafkaBuilder(kafka) + private static KafkaBuilder defaultKafkaNodePools(String namespaceName, String kafkaClusterName, int kafkaReplicas, int zookeeperReplicas) { + KafkaBuilder kb = new KafkaBuilder() .withNewMetadata() - .withName(clusterName) - .withNamespace(kubeClient().getNamespace()) + .withName(kafkaClusterName) + .withNamespace(namespaceName) .addToAnnotations(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled") .endMetadata(); @@ -339,16 +307,16 @@ private static KafkaBuilder defaultKafkaNodePools(Kafka kafka, String clusterNam setDefaultConfigurationOfZookeeperKafka(kb, zookeeperReplicas); setDefaultLogging(kb, true); setMemoryRequestsAndLimitsIfNeeded(kb, true); - kb = removeFieldsNotRelatedToParticularMode(kb, true); + setZookeeperEphemeralStorage(kb); return kb; } - private static KafkaBuilder defaultKafkaKRaft(Kafka kafka, String clusterName, int kafkaReplicas) { - KafkaBuilder kb = new KafkaBuilder(kafka) + private static KafkaBuilder defaultKafkaKRaft(String namespaceName, String kafkaClusterName, int kafkaReplicas) { + KafkaBuilder kb = new KafkaBuilder() .withNewMetadata() - .withName(clusterName) - .withNamespace(kubeClient().getNamespace()) + .withName(kafkaClusterName) + .withNamespace(namespaceName) .addToAnnotations(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled") .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled") .endMetadata() @@ -361,7 +329,6 @@ private static KafkaBuilder defaultKafkaKRaft(Kafka kafka, String clusterName, i setDefaultSpecOfKafka(kb, kafkaReplicas); setDefaultLogging(kb, false); setMemoryRequestsAndLimitsIfNeeded(kb, false); - kb = removeFieldsNotRelatedToParticularMode(kb, false); return kb; } @@ -370,6 +337,26 @@ private static KafkaBuilder defaultKafkaKRaft(Kafka kafka, String clusterName, i // Application of defaults to the builders // ------------------------------------------------------------------------------------------- + private static void setKafkaEphemeralStorage(KafkaBuilder kafkaBuilder) { + kafkaBuilder + .editSpec() + .editOrNewKafka() + .withNewEphemeralStorage() + .endEphemeralStorage() + .endKafka() + .endSpec(); + } + + private static void setZookeeperEphemeralStorage(KafkaBuilder kafkaBuilder) { + kafkaBuilder + .editSpec() + .editOrNewZookeeper() + .withNewEphemeralStorage() + .endEphemeralStorage() + .endZookeeper() + .endSpec(); + } + private static void setDefaultConfigurationOfZookeeperKafka(KafkaBuilder kafkaBuilder, int zookeeperReplicas) { kafkaBuilder .editSpec() @@ -494,26 +481,4 @@ private static void setDefaultSpecOfKafka(KafkaBuilder kafkaBuilder, int kafkaRe .endKafka() .endSpec(); } - - private static KafkaBuilder removeFieldsNotRelatedToParticularMode(KafkaBuilder kafkaBuilder, boolean withZookeeper) { - Kafka kafka = kafkaBuilder.build(); - - // in case that we are using file that is not customized to usage of NodePools or KRaft, we need to remove all the - // fields here - if (!withZookeeper) { - kafka.getSpec().setZookeeper(null); - kafka.getSpec().getKafka().getConfig().remove("log.message.format.version"); - kafka.getSpec().getKafka().getConfig().remove("inter.broker.protocol.version"); - } - - kafka.getSpec().getKafka().setStorage(null); - kafka.getSpec().getKafka().setReplicas(null); - - return new KafkaBuilder(kafka); - } - - private static Kafka getKafkaFromYaml(String yamlPath, boolean containsNodePools) { - return containsNodePools ? - TestUtils.configFromMultiYamlFile(yamlPath, Kafka.RESOURCE_KIND, Kafka.class) : TestUtils.configFromYaml(yamlPath, Kafka.class); - } } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaTopicTemplates.java b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaTopicTemplates.java index 275f2bd4ed..1f30dde182 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaTopicTemplates.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaTopicTemplates.java @@ -4,57 +4,71 @@ */ package io.strimzi.systemtest.templates.crd; -import io.strimzi.api.kafka.model.topic.KafkaTopic; import io.strimzi.api.kafka.model.topic.KafkaTopicBuilder; import io.strimzi.operator.common.model.Labels; -import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.storage.TestStorage; -import io.strimzi.test.TestUtils; public class KafkaTopicTemplates { private KafkaTopicTemplates() {} public static KafkaTopicBuilder topic(TestStorage testStorage) { - return defaultTopic(testStorage.getClusterName(), testStorage.getTopicName(), 1, 1, 1, testStorage.getNamespaceName()); + return defaultTopic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName(), 1, 1, 1); } public static KafkaTopicBuilder continuousTopic(TestStorage testStorage) { - return defaultTopic(testStorage.getClusterName(), testStorage.getContinuousTopicName(), 1, 1, 1, testStorage.getNamespaceName()); + return defaultTopic(testStorage.getNamespaceName(), testStorage.getClusterName(), testStorage.getContinuousTopicName(), 1, 1, 1); } - public static KafkaTopicBuilder topic(String clusterName, String topicName, String topicNamespace) { - return defaultTopic(clusterName, topicName, 1, 1, 1, topicNamespace); + public static KafkaTopicBuilder topic(String namespaceName, String topicName, String kafkaClusterName) { + return defaultTopic(namespaceName, topicName, kafkaClusterName, 1, 1, 1); } - public static KafkaTopicBuilder topic(String clusterName, String topicName, int partitions, String topicNamespace) { - return defaultTopic(clusterName, topicName, partitions, 1, 1, topicNamespace); + public static KafkaTopicBuilder topic(String namespaceName, String topicName, String kafkaClusterName, int partitions) { + return defaultTopic(namespaceName, topicName, kafkaClusterName, partitions, 1, 1); } - public static KafkaTopicBuilder topic(String clusterName, String topicName, int partitions, int replicas, String topicNamespace) { - return defaultTopic(clusterName, topicName, partitions, replicas, replicas, topicNamespace); + public static KafkaTopicBuilder topic( + String namespaceName, + String topicName, + String kafkaClusterName, + int partitions, + int replicas + ) { + return defaultTopic(namespaceName, topicName, kafkaClusterName, partitions, replicas, replicas); } - public static KafkaTopicBuilder topic(String clusterName, String topicName, int partitions, int replicas, int minIsr, String topicNamespace) { - return defaultTopic(clusterName, topicName, partitions, replicas, minIsr, topicNamespace); + public static KafkaTopicBuilder topic( + String namespaceName, + String topicName, + String kafkaClusterName, + int partitions, + int replicas, + int minIsr + ) { + return defaultTopic(namespaceName, topicName, kafkaClusterName, partitions, replicas, minIsr); } - public static KafkaTopicBuilder defaultTopic(String clusterName, String topicName, int partitions, int replicas, int minIsr, String topicNamespace) { - KafkaTopic kafkaTopic = getKafkaTopicFromYaml(TestConstants.PATH_TO_KAFKA_TOPIC_CONFIG); - return new KafkaTopicBuilder(kafkaTopic) + public static KafkaTopicBuilder defaultTopic( + String namespaceName, + String topicName, + String kafkaClusterName, + int partitions, + int replicas, + int minIsr + ) { + return new KafkaTopicBuilder() .withNewMetadata() .withName(topicName) - .withNamespace(topicNamespace) - .addToLabels(Labels.STRIMZI_CLUSTER_LABEL, clusterName) + .withNamespace(namespaceName) + .addToLabels(Labels.STRIMZI_CLUSTER_LABEL, kafkaClusterName) .endMetadata() .editSpec() .withPartitions(partitions) .withReplicas(replicas) .addToConfig("min.insync.replicas", minIsr) + .addToConfig("retention.ms", 7200000) + .addToConfig("segment.bytes", 1073741824) .endSpec(); } - - private static KafkaTopic getKafkaTopicFromYaml(String yamlPath) { - return TestUtils.configFromYaml(yamlPath, KafkaTopic.class); - } } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaUserTemplates.java b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaUserTemplates.java index 6b1543d5f4..ea2788cbbf 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaUserTemplates.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaUserTemplates.java @@ -14,11 +14,11 @@ public class KafkaUserTemplates { private KafkaUserTemplates() {} public static KafkaUserBuilder tlsUser(TestStorage testStorage) { - return tlsUser(testStorage.getNamespaceName(), testStorage.getClusterName(), testStorage.getUsername()); + return tlsUser(testStorage.getNamespaceName(), testStorage.getUsername(), testStorage.getClusterName()); } - public static KafkaUserBuilder tlsUser(String namespaceName, String clusterName, String name) { - return defaultUser(namespaceName, clusterName, name) + public static KafkaUserBuilder tlsUser(String namespaceName, String userName, String kafkaClusterName) { + return defaultUser(namespaceName, userName, kafkaClusterName) .withNewSpec() .withNewKafkaUserTlsClientAuthentication() .endKafkaUserTlsClientAuthentication() @@ -26,31 +26,31 @@ public static KafkaUserBuilder tlsUser(String namespaceName, String clusterName, } public static KafkaUserBuilder scramShaUser(TestStorage testStorage) { - return scramShaUser(testStorage.getNamespaceName(), testStorage.getClusterName(), testStorage.getUsername()); + return scramShaUser(testStorage.getNamespaceName(), testStorage.getUsername(), testStorage.getClusterName()); } - public static KafkaUserBuilder scramShaUser(String namespaceName, String clusterName, String name) { - return defaultUser(namespaceName, clusterName, name) + public static KafkaUserBuilder scramShaUser(String namespaceName, String userName, String kafkaClusterName) { + return defaultUser(namespaceName, userName, kafkaClusterName) .withNewSpec() .withNewKafkaUserScramSha512ClientAuthentication() .endKafkaUserScramSha512ClientAuthentication() .endSpec(); } - public static KafkaUserBuilder tlsExternalUser(final String namespaceName, final String clusterName, final String name) { - return defaultUser(namespaceName, clusterName, name) + public static KafkaUserBuilder tlsExternalUser(final String namespaceName, final String userName, final String kafkaClusterName) { + return defaultUser(namespaceName, userName, kafkaClusterName) .withNewSpec() .withNewKafkaUserTlsExternalClientAuthentication() .endKafkaUserTlsExternalClientAuthentication() .endSpec(); } - public static KafkaUserBuilder defaultUser(String namespaceName, String clusterName, String name) { + public static KafkaUserBuilder defaultUser(String namespaceName, String userName, String kafkaClusterName) { return new KafkaUserBuilder() .withNewMetadata() - .withName(name) + .withName(userName) .withNamespace(namespaceName) - .addToLabels(Labels.STRIMZI_CLUSTER_LABEL, clusterName) + .addToLabels(Labels.STRIMZI_CLUSTER_LABEL, kafkaClusterName) .endMetadata(); } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/templates/kubernetes/NetworkPolicyTemplates.java b/systemtest/src/main/java/io/strimzi/systemtest/templates/kubernetes/NetworkPolicyTemplates.java index 71abd26866..a397c9112d 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/templates/kubernetes/NetworkPolicyTemplates.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/templates/kubernetes/NetworkPolicyTemplates.java @@ -9,13 +9,9 @@ import io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyBuilder; import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.enums.DefaultNetworkPolicy; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; public class NetworkPolicyTemplates { - private static final Logger LOGGER = LogManager.getLogger(NetworkPolicyTemplates.class); - public static NetworkPolicyBuilder networkPolicyBuilder(String namespace, String name, LabelSelector labelSelector) { return new NetworkPolicyBuilder() .withApiVersion("networking.k8s.io/v1") diff --git a/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaTopicScalabilityUtils.java b/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaTopicScalabilityUtils.java index 90f39807eb..8d360b18e4 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaTopicScalabilityUtils.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaTopicScalabilityUtils.java @@ -45,8 +45,8 @@ public static void createTopicsViaK8s(String namespaceName, String clusterName, for (int i = start; i < end; i++) { String currentTopicName = topicPrefix + "-" + i; - ResourceManager.getInstance().createResourceWithoutWait(KafkaTopicTemplates.topic( - clusterName, currentTopicName, numberOfPartitions, numberOfReplicas, minInSyncReplicas, namespaceName).build()); + ResourceManager.getInstance().createResourceWithoutWait(KafkaTopicTemplates.topic(namespaceName, currentTopicName, clusterName, + numberOfPartitions, numberOfReplicas, minInSyncReplicas).build()); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeCorsST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeCorsST.java index 81a8d9f284..839e398c48 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeCorsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeCorsST.java @@ -134,21 +134,21 @@ void beforeAll() { KafkaNodePoolTemplates.controllerPoolPersistentStorage(suiteTestStorage.getNamespaceName(), suiteTestStorage.getControllerPoolName(), suiteTestStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(suiteTestStorage.getClusterName(), 1, 3) - .editMetadata() - .withNamespace(suiteTestStorage.getNamespaceName()) - .endMetadata() - .build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(suiteTestStorage.getNamespaceName(), suiteTestStorage.getClusterName(), 1, 3).build()); resourceManager.createResourceWithWait(ScraperTemplates.scraperPod(suiteTestStorage.getNamespaceName(), suiteTestStorage.getScraperName()).build()); suiteTestStorage.addToTestStorage(TestConstants.SCRAPER_POD_KEY, kubeClient().listPodsByPrefixInName(suiteTestStorage.getNamespaceName(), suiteTestStorage.getScraperName()).get(0).getMetadata().getName()); - resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridgeWithCors(suiteTestStorage.getClusterName(), KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), - 1, ALLOWED_ORIGIN, null) - .editMetadata() - .withNamespace(suiteTestStorage.getNamespaceName()) - .endMetadata() - .build()); + resourceManager.createResourceWithWait( + KafkaBridgeTemplates.kafkaBridgeWithCors( + suiteTestStorage.getNamespaceName(), + suiteTestStorage.getClusterName(), + KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), + 1, + ALLOWED_ORIGIN, + null + ).build() + ); NetworkPolicyResource.allowNetworkPolicySettingsForBridgeScraper(suiteTestStorage.getNamespaceName(), suiteTestStorage.getScraperPodName(), KafkaBridgeResources.componentName(suiteTestStorage.getClusterName())); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java index ab6d14048e..1a7915a399 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java @@ -85,7 +85,7 @@ void testSendSimpleMessage() { .build(); // Create topic - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(suiteTestStorage.getClusterName(), testStorage.getTopicName(), testStorage.getNamespaceName()).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); resourceManager.createResourceWithWait(kafkaBridgeClientJob.producerStrimziBridge()); ClientUtils.waitForClientSuccess(testStorage.getProducerName(), testStorage.getNamespaceName(), testStorage.getMessageCount()); @@ -103,7 +103,7 @@ void testSendSimpleMessage() { void testReceiveSimpleMessage() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(suiteTestStorage.getClusterName(), testStorage.getTopicName(), Environment.TEST_SUITE_NAMESPACE).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); final BridgeClients kafkaBridgeClientJob = new BridgeClientsBuilder() .withConsumerName(testStorage.getConsumerName()) @@ -157,10 +157,7 @@ void testCustomAndUpdatedValues() { int updatedPeriodSeconds = 5; int updatedFailureThreshold = 1; - resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(bridgeName, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(Environment.TEST_SUITE_NAMESPACE, bridgeName, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), 1) .editSpec() .withNewTemplate() .withNewBridgeContainer() @@ -241,11 +238,7 @@ void testScaleBridgeToZero() { String bridgeName = "scaling-bridge-down"; - resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(bridgeName, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() - .build()); + resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(Environment.TEST_SUITE_NAMESPACE, bridgeName, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), 1).build()); List bridgePods = kubeClient(Environment.TEST_SUITE_NAMESPACE).listPodNames(Labels.STRIMZI_CLUSTER_LABEL, bridgeName); String deploymentName = KafkaBridgeResources.componentName(bridgeName); @@ -269,11 +262,7 @@ void testScaleBridgeToZero() { void testScaleBridgeSubresource() { String bridgeName = "scaling-bridge-up"; - resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(bridgeName, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() - .build()); + resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(Environment.TEST_SUITE_NAMESPACE, bridgeName, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), 1).build()); int scaleTo = 4; long bridgeObsGen = KafkaBridgeResource.kafkaBridgeClient().inNamespace(Environment.TEST_SUITE_NAMESPACE).withName(bridgeName).get().getStatus().getObservedGeneration(); @@ -308,10 +297,7 @@ void testScaleBridgeSubresource() { void testConfigureDeploymentStrategy() { String bridgeName = "example-bridge"; - resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(bridgeName, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(Environment.TEST_SUITE_NAMESPACE, bridgeName, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), 1) .editSpec() .editOrNewTemplate() .editOrNewDeployment() @@ -360,11 +346,7 @@ void testCustomBridgeLabelsAreProperlySet() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String bridgeName = "bridge-" + testStorage.getClusterName(); - resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(bridgeName, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() - .build()); + resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(Environment.TEST_SUITE_NAMESPACE, bridgeName, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), 1).build()); // get service with custom labels final Service kafkaBridgeService = kubeClient().getService(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.serviceName(bridgeName)); @@ -418,18 +400,11 @@ void createClassResources() { ) ); // Deploy kafka - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(suiteTestStorage.getClusterName(), 1, 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() - .build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(suiteTestStorage.getNamespaceName(), suiteTestStorage.getClusterName(), 1, 1).build()); // Deploy http bridge - resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(suiteTestStorage.getClusterName(), + resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(suiteTestStorage.getNamespaceName(), suiteTestStorage.getClusterName(), KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() .editSpec() .withNewConsumer() .addToConfig(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java index 3f8b1d8fb0..ca57d1ece3 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java @@ -55,7 +55,7 @@ void testSendSimpleMessageTlsScramSha() { .build(); // Create topic - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(suiteTestStorage.getClusterName(), testStorage.getTopicName(), testStorage.getNamespaceName()).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); resourceManager.createResourceWithWait(kafkaBridgeClientJb.producerStrimziBridge()); ClientUtils.waitForClientSuccess(testStorage.getProducerName(), testStorage.getNamespaceName(), testStorage.getMessageCount()); @@ -77,7 +77,7 @@ void testReceiveSimpleMessageTlsScramSha() { .withConsumerName(testStorage.getConsumerName()) .build(); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(suiteTestStorage.getClusterName(), testStorage.getTopicName(), Environment.TEST_SUITE_NAMESPACE).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); resourceManager.createResourceWithWait(kafkaBridgeClientJb.consumerStrimziBridge()); // Send messages to Kafka @@ -106,10 +106,7 @@ void setUp() { ) ); // Deploy kafka - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(suiteTestStorage.getClusterName(), 1, 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(suiteTestStorage.getNamespaceName(), suiteTestStorage.getClusterName(), 1, 1) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -124,11 +121,7 @@ void setUp() { .endSpec().build()); // Create Kafka user - KafkaUser scramShaUser = KafkaUserTemplates.scramShaUser(Environment.TEST_SUITE_NAMESPACE, suiteTestStorage.getClusterName(), suiteTestStorage.getUsername()) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() - .build(); + KafkaUser scramShaUser = KafkaUserTemplates.scramShaUser(suiteTestStorage.getNamespaceName(), suiteTestStorage.getUsername(), suiteTestStorage.getClusterName()).build(); resourceManager.createResourceWithWait(scramShaUser); @@ -143,11 +136,9 @@ void setUp() { certSecret.setSecretName(KafkaResources.clusterCaCertificateSecretName(suiteTestStorage.getClusterName())); // Deploy http bridge - resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(suiteTestStorage.getClusterName(), - KafkaResources.tlsBootstrapAddress(suiteTestStorage.getClusterName()), 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait( + KafkaBridgeTemplates.kafkaBridge(suiteTestStorage.getNamespaceName(), + suiteTestStorage.getClusterName(), KafkaResources.tlsBootstrapAddress(suiteTestStorage.getClusterName()), 1) .editSpec() .withNewConsumer() .addToConfig(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") @@ -159,7 +150,9 @@ void setUp() { .withNewTls() .withTrustedCertificates(certSecret) .endTls() - .endSpec().build()); + .endSpec() + .build() + ); kafkaBridgeClientJob = new BridgeClientsBuilder() .withBootstrapAddress(KafkaBridgeResources.serviceName(suiteTestStorage.getClusterName())) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java index 04c4ccb6b4..7e1d6fa6c4 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java @@ -61,7 +61,7 @@ void testSendSimpleMessageTls() { .withProducerName(testStorage.getProducerName()) .build(); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(suiteTestStorage.getClusterName(), testStorage.getTopicName(), testStorage.getNamespaceName()).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); resourceManager.createResourceWithWait(kafkaBridgeClientJobProduce.producerStrimziBridge()); ClientUtils.waitForClientSuccess(testStorage.getProducerName(), testStorage.getNamespaceName(), testStorage.getMessageCount()); @@ -83,7 +83,7 @@ void testReceiveSimpleMessageTls() { .withConsumerName(testStorage.getConsumerName()) .build(); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(suiteTestStorage.getClusterName(), testStorage.getTopicName(), testStorage.getNamespaceName()).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); resourceManager.createResourceWithWait(kafkaBridgeClientJobConsume.consumerStrimziBridge()); @@ -163,10 +163,7 @@ private void testWeirdUsername(String weirdUserName, KafkaListenerAuthentication KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3, 1) - .editMetadata() - .withNamespace(testStorage.getNamespaceName()) - .endMetadata() + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 1) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -197,24 +194,13 @@ private void testWeirdUsername(String weirdUserName, KafkaListenerAuthentication // Create user if (auth.getType().equals(TestConstants.TLS_LISTENER_DEFAULT_NAME)) { - resourceManager.createResourceWithWait(KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), testStorage.getClusterName(), weirdUserName) - .editMetadata() - .withNamespace(testStorage.getNamespaceName()) - .endMetadata() - .build()); + resourceManager.createResourceWithWait(KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), weirdUserName, testStorage.getClusterName()).build()); } else { - resourceManager.createResourceWithWait(KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), testStorage.getClusterName(), weirdUserName) - .editMetadata() - .withNamespace(testStorage.getNamespaceName()) - .endMetadata() - .build()); + resourceManager.createResourceWithWait(KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), weirdUserName, testStorage.getClusterName()).build()); } // Deploy http bridge - resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(testStorage.getClusterName(), KafkaResources.tlsBootstrapAddress(testStorage.getClusterName()), 1) - .editMetadata() - .withNamespace(testStorage.getNamespaceName()) - .endMetadata() + resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(testStorage.getNamespaceName(), testStorage.getClusterName(), KafkaResources.tlsBootstrapAddress(testStorage.getClusterName()), 1) .withNewSpecLike(spec) .withBootstrapServers(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) .withNewHttp(TestConstants.HTTP_BRIDGE_DEFAULT_PORT) @@ -259,10 +245,7 @@ void setUp() { KafkaNodePoolTemplates.controllerPoolPersistentStorage(suiteTestStorage.getNamespaceName(), suiteTestStorage.getControllerPoolName(), suiteTestStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(suiteTestStorage.getClusterName(), 1, 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(Environment.TEST_SUITE_NAMESPACE, suiteTestStorage.getClusterName(), 1, 1) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -278,7 +261,7 @@ void setUp() { .build()); // Create Kafka user - KafkaUser tlsUser = KafkaUserTemplates.tlsUser(Environment.TEST_SUITE_NAMESPACE, suiteTestStorage.getClusterName(), suiteTestStorage.getUsername()).build(); + KafkaUser tlsUser = KafkaUserTemplates.tlsUser(Environment.TEST_SUITE_NAMESPACE, suiteTestStorage.getUsername(), suiteTestStorage.getClusterName()).build(); resourceManager.createResourceWithWait(tlsUser); // Initialize CertSecretSource with certificate and Secret names for consumer @@ -287,10 +270,7 @@ void setUp() { certSecret.setSecretName(KafkaResources.clusterCaCertificateSecretName(suiteTestStorage.getClusterName())); // Deploy http bridge - resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(suiteTestStorage.getClusterName(), KafkaResources.tlsBootstrapAddress(suiteTestStorage.getClusterName()), 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(Environment.TEST_SUITE_NAMESPACE, suiteTestStorage.getClusterName(), KafkaResources.tlsBootstrapAddress(suiteTestStorage.getClusterName()), 1) .editSpec() .withNewConsumer() .addToConfig(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") diff --git a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java index 57f7f76ccf..1b7b57fa6e 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java @@ -158,7 +158,7 @@ void testBuildFailsWithWrongChecksumOfArtifact() { resourceManager.createResourceWithWait(ScraperTemplates.scraperPod(testStorage.getNamespaceName(), testStorage.getScraperName()).build()); - resourceManager.createResourceWithoutWait(KafkaConnectTemplates.kafkaConnect(testStorage.getClusterName(), testStorage.getNamespaceName(), suiteTestStorage.getClusterName(), 1) + resourceManager.createResourceWithoutWait(KafkaConnectTemplates.kafkaConnect(testStorage.getNamespaceName(), testStorage.getClusterName(), suiteTestStorage.getClusterName(), 1) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() @@ -219,8 +219,8 @@ void testBuildWithJarTgzAndZip() { // this test also testing push into Docker output final String imageName = getImageNameForTestCase(); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName(), testStorage.getNamespaceName()).build()); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getClusterName(), testStorage.getNamespaceName(), suiteTestStorage.getClusterName(), 1) + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName()).build()); + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getNamespaceName(), testStorage.getClusterName(), suiteTestStorage.getClusterName(), 1) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() @@ -243,10 +243,7 @@ void testBuildWithJarTgzAndZip() { connectorConfig.put("topics", testStorage.getTopicName()); connectorConfig.put("level", "INFO"); - resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getClusterName()) - .editMetadata() - .withNamespace(testStorage.getNamespaceName()) - .endMetadata() + resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getNamespaceName(), testStorage.getClusterName()) .editOrNewSpec() .withClassName(TestConstants.ECHO_SINK_CLASS_NAME) .withConfig(connectorConfig) @@ -280,7 +277,7 @@ void testPushIntoImageStream() { kubeClient().getClient().adapt(OpenShiftClient.class).imageStreams().inNamespace(testStorage.getNamespaceName()).resource(imageStream).create(); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getClusterName(), testStorage.getNamespaceName(), suiteTestStorage.getClusterName(), 1) + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getNamespaceName(), testStorage.getClusterName(), suiteTestStorage.getClusterName(), 1) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() @@ -323,9 +320,9 @@ void testUpdateConnectWithAnotherPlugin() { .build(); String topicName = KafkaTopicUtils.generateRandomNameOfTopic(); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), topicName, testStorage.getNamespaceName()).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), topicName, testStorage.getClusterName()).build()); - KafkaConnect connect = KafkaConnectTemplates.kafkaConnect(testStorage.getClusterName(), testStorage.getNamespaceName(), suiteTestStorage.getClusterName(), 1) + KafkaConnect connect = KafkaConnectTemplates.kafkaConnect(testStorage.getNamespaceName(), testStorage.getClusterName(), suiteTestStorage.getClusterName(), 1) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() @@ -354,10 +351,7 @@ void testUpdateConnectWithAnotherPlugin() { echoSinkConfig.put("level", "INFO"); LOGGER.info("Creating EchoSink KafkaConnector"); - resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(TestConstants.ECHO_SINK_CONNECTOR_NAME, testStorage.getClusterName()) - .editMetadata() - .withNamespace(testStorage.getNamespaceName()) - .endMetadata() + resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getNamespaceName(), TestConstants.ECHO_SINK_CONNECTOR_NAME, testStorage.getClusterName()) .editOrNewSpec() .withClassName(TestConstants.ECHO_SINK_CLASS_NAME) .withConfig(echoSinkConfig) @@ -385,10 +379,7 @@ void testUpdateConnectWithAnotherPlugin() { camelHttpConfig.put("topics", topicName); LOGGER.info("Creating Camel-HTTP-Sink KafkaConnector"); - resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(camelConnector, testStorage.getClusterName()) - .editMetadata() - .withNamespace(testStorage.getNamespaceName()) - .endMetadata() + resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getNamespaceName(), camelConnector, testStorage.getClusterName()) .editOrNewSpec() .withClassName(CAMEL_CONNECTOR_HTTP_SINK_CLASS_NAME) .withConfig(camelHttpConfig) @@ -416,7 +407,7 @@ void testBuildOtherPluginTypeWithAndWithoutFileName() { resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), topicName, testStorage.getNamespaceName()).build()); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getClusterName(), testStorage.getNamespaceName(), suiteTestStorage.getClusterName(), 1) + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getNamespaceName(), testStorage.getClusterName(), suiteTestStorage.getClusterName(), 1) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() @@ -473,8 +464,8 @@ void testBuildPluginUsingMavenCoordinatesArtifacts() { final String connectorName = testStorage.getClusterName() + "-camel-connector"; resourceManager.createResourceWithWait( - KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getNamespaceName()).build(), - KafkaConnectTemplates.kafkaConnect(testStorage.getClusterName(), testStorage.getNamespaceName(), suiteTestStorage.getClusterName(), 1) + KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName()).build(), + KafkaConnectTemplates.kafkaConnect(testStorage.getNamespaceName(), testStorage.getClusterName(), suiteTestStorage.getClusterName(), 1) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() @@ -494,10 +485,7 @@ void testBuildPluginUsingMavenCoordinatesArtifacts() { connectorConfig.put("topics", testStorage.getTopicName()); connectorConfig.put("camel.source.path.timerName", "timer"); - resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(connectorName, testStorage.getClusterName()) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getNamespaceName(), connectorName, testStorage.getClusterName()) .editOrNewSpec() .withClassName(CAMEL_CONNECTOR_TIMER_CLASS_NAME) .withConfig(connectorConfig) @@ -535,10 +523,6 @@ void setup() { KafkaNodePoolTemplates.controllerPool(suiteTestStorage.getNamespaceName(), suiteTestStorage.getControllerPoolName(), suiteTestStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(suiteTestStorage.getClusterName(), 3) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() - .build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(suiteTestStorage.getNamespaceName(), suiteTestStorage.getClusterName(), 3).build()); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java index 9143edb757..b835692637 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java @@ -132,8 +132,8 @@ void testDeployRollUndeploy() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).build()); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getClusterName(), testStorage.getNamespaceName(), connectReplicasCount).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3).build()); + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getNamespaceName(), testStorage.getClusterName(), connectReplicasCount).build()); // Test ManualRolling Update LOGGER.info("KafkaConnect manual rolling update"); @@ -186,9 +186,9 @@ void testKafkaConnectAndConnectorStateWithFileSinkPlugin() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3).build()); resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage).build()); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getClusterName(), testStorage.getNamespaceName(), 1) + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), testStorage.getClusterName(), 1) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() @@ -205,7 +205,7 @@ void testKafkaConnectAndConnectorStateWithFileSinkPlugin() { KafkaConnectUtils.waitUntilKafkaConnectRestApiIsAvailable(testStorage.getNamespaceName(), connectPodName); LOGGER.info("Creating KafkaConnector: {}/{} without 'spec.pause' or 'spec.state' specified", testStorage.getNamespaceName(), testStorage.getClusterName()); - resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getClusterName()) + resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getNamespaceName(), testStorage.getClusterName()) .editSpec() .withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector") .addToConfig("topics", testStorage.getTopicName()) @@ -244,7 +244,7 @@ void testKafkaConnectWithPlainAndScramShaAuthentication() { ) ); // Use a Kafka with plain listener disabled - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -258,12 +258,12 @@ void testKafkaConnectWithPlainAndScramShaAuthentication() { .endSpec() .build()); - KafkaUser kafkaUser = KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), testStorage.getClusterName(), testStorage.getUsername()).build(); + KafkaUser kafkaUser = KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), testStorage.getUsername(), testStorage.getClusterName()).build(); resourceManager.createResourceWithWait(kafkaUser); resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage).build()); - KafkaConnect connect = KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getClusterName(), testStorage.getNamespaceName(), 1) + KafkaConnect connect = KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), testStorage.getClusterName(), 1) .editSpec() .withBootstrapServers(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) .withNewKafkaClientAuthenticationScramSha512() @@ -318,9 +318,9 @@ void testKafkaConnectAndConnectorFileSinkPlugin() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3).build()); - KafkaConnect connect = KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getClusterName(), testStorage.getNamespaceName(), 1) + KafkaConnect connect = KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), testStorage.getClusterName(), 1) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() @@ -337,7 +337,7 @@ void testKafkaConnectAndConnectorFileSinkPlugin() { LOGGER.info("Deploying NetworkPolicies for KafkaConnect"); NetworkPolicyResource.deployNetworkPolicyForResource(connect, KafkaConnectResources.componentName(testStorage.getClusterName())); - resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(connectorName, testStorage.getClusterName(), 2) + resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getNamespaceName(), connectorName, testStorage.getClusterName(), 2) .editSpec() .addToConfig("topic", testStorage.getTopicName()) .endSpec() @@ -368,12 +368,12 @@ void testJvmAndResources() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3).build()); Map jvmOptionsXX = new HashMap<>(); jvmOptionsXX.put("UseG1GC", "true"); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getClusterName(), testStorage.getNamespaceName(), 1) + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getNamespaceName(), testStorage.getClusterName(), 1) .editSpec() .withResources(new ResourceRequirementsBuilder() .addToLimits("memory", new Quantity("400M")) @@ -407,9 +407,9 @@ void testKafkaConnectScaleUpScaleDown() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3).build()); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getClusterName(), testStorage.getNamespaceName(), 1).build()); + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), testStorage.getClusterName(), 1).build()); // kafka cluster Connect already deployed List connectPods = kubeClient(testStorage.getNamespaceName()).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.componentName(testStorage.getClusterName())); @@ -443,7 +443,7 @@ void testSecretsWithKafkaConnectWithTlsAndTlsClientAuthentication() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -462,7 +462,7 @@ void testSecretsWithKafkaConnectWithTlsAndTlsClientAuthentication() { resourceManager.createResourceWithWait(kafkaUser); resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage).build()); - KafkaConnect connect = KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getClusterName(), testStorage.getNamespaceName(), 1) + KafkaConnect connect = KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), testStorage.getClusterName(), 1) .editSpec() .addToConfig("key.converter.schemas.enable", false) .addToConfig("value.converter.schemas.enable", false) @@ -517,7 +517,7 @@ void testSecretsWithKafkaConnectWithTlsAndScramShaAuthentication() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -536,7 +536,7 @@ void testSecretsWithKafkaConnectWithTlsAndScramShaAuthentication() { resourceManager.createResourceWithWait(kafkaUser); resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage).build()); - KafkaConnect connect = KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getClusterName(), testStorage.getNamespaceName(), 1) + KafkaConnect connect = KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), testStorage.getClusterName(), 1) .editSpec() .addToConfig("key.converter.schemas.enable", false) .addToConfig("value.converter.schemas.enable", false) @@ -589,11 +589,11 @@ void testConnectorTaskAutoRestart() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3).build()); final String imageFullPath = Environment.getImageOutputRegistry(testStorage.getNamespaceName(), TestConstants.ST_CONNECT_BUILD_IMAGE_NAME, String.valueOf(new Random().nextInt(Integer.MAX_VALUE))); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName(), 3, 3, testStorage.getNamespaceName()).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName(), 3, 3).build()); final Plugin echoSinkPlugin = new PluginBuilder() .withName(TestConstants.ECHO_SINK_CONNECTOR_NAME) @@ -604,7 +604,7 @@ void testConnectorTaskAutoRestart() { .build()) .build(); - KafkaConnect connect = KafkaConnectTemplates.kafkaConnect(testStorage.getClusterName(), testStorage.getNamespaceName(), testStorage.getClusterName(), 1) + KafkaConnect connect = KafkaConnectTemplates.kafkaConnect(testStorage.getNamespaceName(), testStorage.getClusterName(), 1) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() @@ -638,7 +638,7 @@ void testConnectorTaskAutoRestart() { echoSinkConfig.put("fail.task.after.records", failMessageCount); LOGGER.info("Creating EchoSink KafkaConnector"); - resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(TestConstants.ECHO_SINK_CONNECTOR_NAME, testStorage.getClusterName()) + resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getNamespaceName(), TestConstants.ECHO_SINK_CONNECTOR_NAME, testStorage.getClusterName()) .editOrNewSpec() .withTasksMax(1) .withClassName(TestConstants.ECHO_SINK_CLASS_NAME) @@ -706,8 +706,8 @@ void testCustomAndUpdatedValues() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3, 1).build()); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getClusterName(), testStorage.getNamespaceName(), 1) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 1).build()); + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), testStorage.getClusterName(), 1) .editSpec() .withNewTemplate() .withNewConnectContainer() @@ -785,9 +785,9 @@ void testMultiNodeKafkaConnectWithConnectorCreation() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3).build()); // Crate connect cluster with default connect image - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getClusterName(), testStorage.getNamespaceName(), 3) + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() @@ -799,7 +799,7 @@ void testMultiNodeKafkaConnectWithConnectorCreation() { .endSpec() .build()); - resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getClusterName()) + resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getNamespaceName(), testStorage.getClusterName()) .editSpec() .withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector") .addToConfig("topics", testStorage.getTopicName()) @@ -840,7 +840,7 @@ void testConnectTlsAuthWithWeirdUserName() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -854,9 +854,9 @@ void testConnectTlsAuthWithWeirdUserName() { .endSpec() .build()); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName(), testStorage.getNamespaceName()).build()); - resourceManager.createResourceWithWait(KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), testStorage.getClusterName(), weirdUserName).build()); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getClusterName(), testStorage.getNamespaceName(), 1) + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName()).build()); + resourceManager.createResourceWithWait(KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), weirdUserName, testStorage.getClusterName()).build()); + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), testStorage.getClusterName(), 1) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() @@ -881,7 +881,7 @@ void testConnectTlsAuthWithWeirdUserName() { .withBootstrapServers(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) .endSpec() .build()); - resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getClusterName()) + resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getNamespaceName(), testStorage.getClusterName()) .editSpec() .withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector") .addToConfig("topics", testStorage.getTopicName()) @@ -916,7 +916,7 @@ void testConnectScramShaAuthWithWeirdUserName() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -930,9 +930,9 @@ void testConnectScramShaAuthWithWeirdUserName() { .endSpec() .build()); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName(), testStorage.getNamespaceName()).build()); - resourceManager.createResourceWithWait(KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), testStorage.getClusterName(), weirdUserName).build()); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getClusterName(), testStorage.getNamespaceName(), 1) + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName()).build()); + resourceManager.createResourceWithWait(KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), weirdUserName, testStorage.getClusterName()).build()); + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), testStorage.getClusterName(), 1) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() @@ -957,7 +957,7 @@ void testConnectScramShaAuthWithWeirdUserName() { .endTls() .endSpec() .build()); - resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getClusterName()) + resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getNamespaceName(), testStorage.getClusterName()) .editSpec() .withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector") .addToConfig("topics", testStorage.getTopicName()) @@ -989,8 +989,8 @@ void testScaleConnectWithoutConnectorToZero() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).build()); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getClusterName(), testStorage.getNamespaceName(), 2).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3).build()); + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), testStorage.getClusterName(), 2).build()); List connectPods = kubeClient(testStorage.getNamespaceName()).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.componentName(testStorage.getClusterName())); @@ -1021,14 +1021,14 @@ void testScaleConnectWithConnectorToZero() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).build()); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getClusterName(), testStorage.getNamespaceName(), 2) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3).build()); + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), testStorage.getClusterName(), 2) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() .build()); - resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getClusterName()) + resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getNamespaceName(), testStorage.getClusterName()) .editSpec() .withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector") .addToConfig("file", TestConstants.DEFAULT_SINK_FILE_PATH) @@ -1071,14 +1071,14 @@ void testScaleConnectAndConnectorSubresource() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).build()); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getClusterName(), testStorage.getNamespaceName(), 1) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3).build()); + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), testStorage.getClusterName(), 1) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() .build()); - resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getClusterName()) + resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getNamespaceName(), testStorage.getClusterName()) .editSpec() .withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector") .addToConfig("file", TestConstants.DEFAULT_SINK_FILE_PATH) @@ -1090,7 +1090,6 @@ void testScaleConnectAndConnectorSubresource() { final int scaleTo = 4; final long connectObsGen = KafkaConnectResource.kafkaConnectClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus().getObservedGeneration(); - final String connectGenName = kubeClient(testStorage.getNamespaceName()).listPodsByPrefixInName(KafkaConnectResources.componentName(testStorage.getClusterName())).get(0).getMetadata().getGenerateName(); LOGGER.info("-------> Scaling KafkaConnect subresource <-------"); LOGGER.info("Scaling subresource replicas to {}", scaleTo); @@ -1201,8 +1200,8 @@ void testMountingSecretAndConfigMapAsVolumesAndEnvVars() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).build()); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getClusterName(), testStorage.getNamespaceName(), 1) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3).build()); + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getNamespaceName(), testStorage.getClusterName(), 1) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() @@ -1311,7 +1310,7 @@ void testKafkaConnectWithScramShaAuthenticationRolledAfterPasswordChanged() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -1335,7 +1334,7 @@ void testKafkaConnectWithScramShaAuthenticationRolledAfterPasswordChanged() { kubeClient(testStorage.getNamespaceName()).createSecret(passwordSecret); - KafkaUser kafkaUser = KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), testStorage.getClusterName(), testStorage.getKafkaUsername()) + KafkaUser kafkaUser = KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), testStorage.getKafkaUsername(), testStorage.getClusterName()) .editSpec() .withNewKafkaUserScramSha512ClientAuthentication() .withNewPassword() @@ -1349,8 +1348,8 @@ void testKafkaConnectWithScramShaAuthenticationRolledAfterPasswordChanged() { resourceManager.createResourceWithWait(kafkaUser); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName(), testStorage.getNamespaceName()).build()); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getClusterName(), testStorage.getNamespaceName(), 1) + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName()).build()); + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getNamespaceName(), testStorage.getClusterName(), 1) .withNewSpec() .withBootstrapServers(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) .withNewKafkaClientAuthenticationScramSha512() @@ -1384,7 +1383,7 @@ void testKafkaConnectWithScramShaAuthenticationRolledAfterPasswordChanged() { kubeClient(testStorage.getNamespaceName()).createSecret(newPasswordSecret); - kafkaUser = KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), testStorage.getClusterName(), testStorage.getKafkaUsername()) + kafkaUser = KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), testStorage.getKafkaUsername(), testStorage.getClusterName()) .editSpec() .withNewKafkaUserScramSha512ClientAuthentication() .withNewPassword() diff --git a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlApiST.java b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlApiST.java index ff2bf8a84c..4800dfdd47 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlApiST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlApiST.java @@ -51,7 +51,7 @@ void testCruiseControlBasicAPIRequestsWithSecurityDisabled() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getClusterName(), 3, 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 3) .editOrNewSpec() .withNewCruiseControl() .withConfig(config) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java index 8af577971a..2a3d739f67 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java @@ -68,7 +68,7 @@ void testDeployAndUnDeployCruiseControl() throws IOException { ) ); resourceManager.createResourceWithWait( - KafkaTemplates.kafkaWithCruiseControl(testStorage.getClusterName(), defaultBrokerReplicaCount, defaultBrokerReplicaCount) + KafkaTemplates.kafkaWithCruiseControl(testStorage.getNamespaceName(), testStorage.getClusterName(), defaultBrokerReplicaCount, defaultBrokerReplicaCount) .editSpec() .editOrNewKafka() .addToConfig(Map.of("default.replication.factor", defaultBrokerReplicaCount)) @@ -131,7 +131,7 @@ void testConfigurationUpdate() throws IOException { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getClusterName(), 3, 3).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 3).build()); Map kafkaSnapShot = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getBrokerSelector()); Map cruiseControlSnapShot = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), CruiseControlResources.componentName(testStorage.getClusterName())); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java index a2679e04ee..5ce6fe7031 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java @@ -90,10 +90,7 @@ void testAutoCreationOfCruiseControlTopicsWithResources() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), defaultBrokerReplicaCount).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getClusterName(), defaultBrokerReplicaCount, defaultBrokerReplicaCount) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName(), defaultBrokerReplicaCount, defaultBrokerReplicaCount) .editOrNewSpec() .editKafka() .addToConfig(Map.of("default.replication.factor", defaultBrokerReplicaCount)) @@ -142,10 +139,7 @@ void testCruiseControlWithApiSecurityDisabled() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getClusterName(), 3, 3) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName(), 3, 3) .editOrNewSpec() .editCruiseControl() .addToConfig("webserver.security.enable", "false") @@ -153,11 +147,7 @@ void testCruiseControlWithApiSecurityDisabled() { .endCruiseControl() .endSpec() .build()); - resourceManager.createResourceWithWait(KafkaRebalanceTemplates.kafkaRebalance(testStorage.getClusterName()) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() - .build()); + resourceManager.createResourceWithWait(KafkaRebalanceTemplates.kafkaRebalance(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName()).build()); KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName(), KafkaRebalanceState.ProposalReady); } @@ -174,16 +164,8 @@ void testCruiseControlWithRebalanceResourceAndRefreshAnnotation() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3, 3) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() - .build()); - resourceManager.createResourceWithoutWait(KafkaRebalanceTemplates.kafkaRebalance(testStorage.getClusterName()) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() - .build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName(), 3, 3).build()); + resourceManager.createResourceWithoutWait(KafkaRebalanceTemplates.kafkaRebalance(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName()).build()); KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName(), KafkaRebalanceState.NotReady); @@ -192,7 +174,7 @@ void testCruiseControlWithRebalanceResourceAndRefreshAnnotation() { // CruiseControl spec is now enabled KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), kafka -> { // Get default CC spec with tune options and set it to existing Kafka - Kafka kafkaUpdated = KafkaTemplates.kafkaWithCruiseControlTunedForFastModelGeneration(testStorage.getClusterName(), 3, 3).build(); + Kafka kafkaUpdated = KafkaTemplates.kafkaWithCruiseControlTunedForFastModelGeneration(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName(), 3, 3).build(); kafka.getSpec().setCruiseControl(kafkaUpdated.getSpec().getCruiseControl()); kafka.getSpec().setKafka(kafkaUpdated.getSpec().getKafka()); }, Environment.TEST_SUITE_NAMESPACE); @@ -219,13 +201,9 @@ void testCruiseControlChangesFromRebalancingtoProposalReadyWhenSpecUpdated() { KafkaNodePoolTemplates.controllerPool(clusterOperator.getDeploymentNamespace(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getClusterName(), 3, 1).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(clusterOperator.getDeploymentNamespace(), testStorage.getClusterName(), 3, 1).build()); - resourceManager.createResourceWithWait(KafkaRebalanceTemplates.kafkaRebalance(testStorage.getClusterName()) - .editMetadata() - .withNamespace(clusterOperator.getDeploymentNamespace()) - .endMetadata() - .build()); + resourceManager.createResourceWithWait(KafkaRebalanceTemplates.kafkaRebalance(clusterOperator.getDeploymentNamespace(), testStorage.getClusterName()).build()); KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(clusterOperator.getDeploymentNamespace(), testStorage.getClusterName(), KafkaRebalanceState.ProposalReady); @@ -254,7 +232,7 @@ void testCruiseControlWithSingleNodeKafka() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithoutWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getClusterName(), 1, 1).build()); + resourceManager.createResourceWithoutWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1).build()); KafkaUtils.waitUntilKafkaStatusConditionContainsMessage(testStorage.getClusterName(), testStorage.getNamespaceName(), errMessage, Duration.ofMinutes(6).toMillis()); @@ -292,12 +270,12 @@ void testCruiseControlTopicExclusion() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getClusterName(), 3, 1).build()); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getClusterName(), excludedTopic1, testStorage.getNamespaceName()).build()); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getClusterName(), excludedTopic2, testStorage.getNamespaceName()).build()); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getClusterName(), includedTopic, testStorage.getNamespaceName()).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 1).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), excludedTopic1, testStorage.getClusterName()).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), excludedTopic2, testStorage.getClusterName()).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), includedTopic, testStorage.getClusterName()).build()); - resourceManager.createResourceWithWait(KafkaRebalanceTemplates.kafkaRebalance(testStorage.getClusterName()) + resourceManager.createResourceWithWait(KafkaRebalanceTemplates.kafkaRebalance(testStorage.getNamespaceName(), testStorage.getClusterName()) .editOrNewSpec() .withExcludedTopics("excluded-.*") .endSpec() @@ -330,7 +308,7 @@ void testCruiseControlReplicaMovementStrategy() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getClusterName(), 3, 3).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 3).build()); String ccPodName = kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), CruiseControlResources.componentName(testStorage.getClusterName())).get(0).getMetadata().getName(); @@ -382,23 +360,17 @@ void testCruiseControlIntraBrokerBalancing() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getClusterName(), 3, 3) - .editMetadata() - .withNamespace(testStorage.getNamespaceName()) - .endMetadata() - .editOrNewSpec() - .editKafka() - .withStorage(jbodStorage) - .endKafka() - .endSpec() + resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 3) + .editOrNewSpec() + .editKafka() + .withStorage(jbodStorage) + .endKafka() + .endSpec() .build()); - resourceManager.createResourceWithWait(KafkaRebalanceTemplates.kafkaRebalance(testStorage.getClusterName()) - .editMetadata() - .withNamespace(testStorage.getNamespaceName()) - .endMetadata() - .editOrNewSpec() - .withRebalanceDisk(true) - .endSpec() + resourceManager.createResourceWithWait(KafkaRebalanceTemplates.kafkaRebalance(testStorage.getNamespaceName(), testStorage.getClusterName()) + .editOrNewSpec() + .withRebalanceDisk(true) + .endSpec() .build()); KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(testStorage.getNamespaceName(), testStorage.getClusterName(), KafkaRebalanceState.ProposalReady); @@ -429,8 +401,8 @@ void testCruiseControlDuringBrokerScaleUpAndDown() { ); resourceManager.createResourceWithWait( - KafkaTemplates.kafkaWithCruiseControlTunedForFastModelGeneration(testStorage.getClusterName(), initialReplicas, initialReplicas).build(), - KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName(), 10, 3, testStorage.getNamespaceName()).build(), + KafkaTemplates.kafkaWithCruiseControlTunedForFastModelGeneration(testStorage.getNamespaceName(), testStorage.getClusterName(), initialReplicas, initialReplicas).build(), + KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName(), 10, 3).build(), ScraperTemplates.scraperPod(testStorage.getNamespaceName(), testStorage.getScraperName()).build() ); @@ -450,7 +422,7 @@ void testCruiseControlDuringBrokerScaleUpAndDown() { // when using add_brokers mode, we can hit `ProposalReady` right after KR creation - that's why `waitReady` is set to `false` here resourceManager.createResourceWithoutWait( - KafkaRebalanceTemplates.kafkaRebalance(testStorage.getClusterName()) + KafkaRebalanceTemplates.kafkaRebalance(testStorage.getNamespaceName(), testStorage.getClusterName()) .editOrNewSpec() .withMode(KafkaRebalanceMode.ADD_BROKERS) .withBrokers(3, 4) @@ -470,7 +442,7 @@ void testCruiseControlDuringBrokerScaleUpAndDown() { // when using remove_brokers mode, we can hit `ProposalReady` right after KR creation - that's why `waitReady` is set to `false` here resourceManager.createResourceWithoutWait( - KafkaRebalanceTemplates.kafkaRebalance(testStorage.getClusterName()) + KafkaRebalanceTemplates.kafkaRebalance(testStorage.getNamespaceName(), testStorage.getClusterName()) .editOrNewSpec() .withMode(KafkaRebalanceMode.REMOVE_BROKERS) .withBrokers(3, 4) @@ -507,10 +479,10 @@ void testKafkaRebalanceAutoApprovalMechanism() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getClusterName(), 3, 3).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaWithCruiseControl(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 3).build()); // KafkaRebalance with auto-approval - resourceManager.createResourceWithWait(KafkaRebalanceTemplates.kafkaRebalance(testStorage.getClusterName()) + resourceManager.createResourceWithWait(KafkaRebalanceTemplates.kafkaRebalance(testStorage.getNamespaceName(), testStorage.getClusterName()) .editMetadata() .addToAnnotations(Annotations.ANNO_STRIMZI_IO_REBALANCE_AUTOAPPROVAL, "true") .endMetadata() diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java index 7044da1306..d5b496f33b 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java @@ -53,7 +53,7 @@ void testConnectWithConnectorUsingConfigAndEnvProvider() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3).build()); Map configData = new HashMap<>(); configData.put("topics", testStorage.getTopicName()); @@ -73,7 +73,7 @@ void testConnectWithConnectorUsingConfigAndEnvProvider() { kubeClient().getClient().configMaps().inNamespace(testStorage.getNamespaceName()).resource(connectorConfig).create(); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getClusterName(), testStorage.getNamespaceName(), 1) + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), testStorage.getClusterName(), 1) .editOrNewMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() @@ -137,7 +137,7 @@ void testConnectWithConnectorUsingConfigAndEnvProvider() { String configPrefix = "configmaps:" + testStorage.getNamespaceName() + "/connector-config:"; - resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getClusterName()) + resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getNamespaceName(), testStorage.getClusterName()) .editSpec() .withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector") .addToConfig("file", "${env:FILE_SINK_FILE}") diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java index 30d59d4a01..37ec3b6d26 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java @@ -93,7 +93,7 @@ void testKafkaNodePoolBrokerIdsManagementUsingAnnotations() { .withAnnotations(Map.of(Annotations.ANNO_STRIMZI_IO_NEXT_NODE_IDS, "[91-93]")) .endMetadata() .build(), - KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 1, 1).build() + KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1).build() ); PodUtils.waitUntilPodStabilityReplicasCount(testStorage.getNamespaceName(), KafkaResource.getStrimziPodSetName(testStorage.getClusterName(), nodePoolNameInitial), 2); @@ -203,11 +203,11 @@ void testNodePoolsRolesChanging() { resourceManager.createResourceWithWait( KafkaNodePoolTemplates.mixedPoolPersistentStorage(testStorage.getNamespaceName(), volatileRolePoolName, testStorage.getClusterName(), 3).build(), - KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 1, 1).build() + KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1).build() ); LOGGER.info("Create KafkaTopic {}/{} with 6 replicas, spawning across all brokers", testStorage.getNamespaceName(), testStorage.getTopicName()); - final KafkaTopic kafkaTopic = KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName(), 1, 6, testStorage.getNamespaceName()).build(); + final KafkaTopic kafkaTopic = KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName(), 1, 6).build(); resourceManager.createResourceWithWait(kafkaTopic); LOGGER.info("wait for Kafka pods stability"); @@ -285,7 +285,7 @@ void testNodePoolsAdditionAndRemoval() { resourceManager.createResourceWithWait( KafkaNodePoolTemplates.brokerPoolPersistentStorage(testStorage.getNamespaceName(), poolAName, testStorage.getClusterName(), 1).build(), KafkaNodePoolTemplates.brokerPoolPersistentStorage(testStorage.getNamespaceName(), poolB1Name, testStorage.getClusterName(), brokerNodePoolReplicaCount).build(), - KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 1, 3) + KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 3) .editOrNewSpec() .editKafka() .addToConfig("auto.create.topics.enable", "false") // topics replica count helps ensure there are enough brokers @@ -347,11 +347,7 @@ void testKafkaManagementTransferToAndFromKafkaNodePool() { LOGGER.info("Deploying Kafka Cluster: {}/{} controlled by KafkaNodePool: {}", testStorage.getNamespaceName(), testStorage.getClusterName(), kafkaNodePoolName); - final Kafka kafkaCr = KafkaTemplates.kafkaPersistentNodePools(testStorage.getClusterName(), originalKafkaReplicaCount, 3) - .editOrNewMetadata() - .withNamespace(testStorage.getNamespaceName()) - .endMetadata() - .build(); + final Kafka kafkaCr = KafkaTemplates.kafkaPersistentNodePools(testStorage.getNamespaceName(), testStorage.getClusterName(), originalKafkaReplicaCount, 3).build(); // as the only FG set in the CO is 'KafkaNodePools' (kraft is never included) Broker role is the only one that can be taken resourceManager.createResourceWithWait( @@ -444,7 +440,7 @@ void testKafkaManagementTransferToAndFromKafkaNodePool() { private void transmitMessagesWithNewTopicAndClean(TestStorage testStorage, int topicReplicas) { final String topicName = testStorage.getTopicName() + "-replicas-" + topicReplicas + "-" + hashStub(String.valueOf(new Random().nextInt(Integer.MAX_VALUE))); - final KafkaTopic kafkaTopic = KafkaTopicTemplates.topic(testStorage.getClusterName(), topicName, 1, topicReplicas, testStorage.getNamespaceName()).build(); + final KafkaTopic kafkaTopic = KafkaTopicTemplates.topic(testStorage.getNamespaceName(), topicName, testStorage.getClusterName(), 1, topicReplicas).build(); resourceManager.createResourceWithWait(kafkaTopic); LOGGER.info("Transmit messages with Kafka {}/{} using topic {}", testStorage.getNamespaceName(), testStorage.getClusterName(), topicName); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java index 37e3764ac5..797038dede 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java @@ -184,7 +184,7 @@ void testJvmAndResources() { ) ); - Kafka kafka = KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 1, 1) + Kafka kafka = KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1) .editSpec() .editKafka() .withResources(brokersResReq) @@ -335,7 +335,7 @@ void testRemoveComponentsFromEntityOperator() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3).build()); Map eoSnapshot = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaResources.entityOperatorDeploymentName(testStorage.getClusterName())); @@ -440,7 +440,7 @@ void testKafkaJBODDeleteClaimsTrueFalse() { ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), kafkaReplicas) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), kafkaReplicas) .editSpec() .editKafka() .withStorage(jbodStorage) @@ -505,7 +505,7 @@ void testRegenerateCertExternalAddressChange() { KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 1).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 1).build()); final String brokerSecret = testStorage.getClusterName() + "-kafka-brokers"; @@ -637,7 +637,7 @@ void testLabelsExistenceAndManipulation() { ) ); - final KafkaBuilder kafkaBuilder = KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 1) + final KafkaBuilder kafkaBuilder = KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 1) .editMetadata() .withLabels(customSpecifiedLabels) .endMetadata() @@ -912,7 +912,7 @@ void testMessagesAndConsumerOffsetFilesOnDisk() { KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 1, 1) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1) .editSpec() .editKafka() .withConfig(kafkaConfig) @@ -922,7 +922,7 @@ void testMessagesAndConsumerOffsetFilesOnDisk() { Map brokerPodsSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getBrokerSelector()); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName(), 1, 1, testStorage.getNamespaceName()).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName(), 1, 1).build()); String brokerPodName = kubeClient().listPods(testStorage.getNamespaceName(), testStorage.getBrokerSelector()).get(0).getMetadata().getName(); @@ -998,7 +998,7 @@ void testMessagesAndConsumerOffsetFilesOnDisk() { void testReadOnlyRootFileSystem() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); - Kafka kafka = KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 3) + Kafka kafka = KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 3) .editSpec() .editKafka() .withNewTemplate() @@ -1088,7 +1088,7 @@ void testDeployUnsupportedKafka() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithoutWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 1, 1) + resourceManager.createResourceWithoutWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1) .editSpec() .editKafka() .withVersion(nonExistingVersion) @@ -1152,7 +1152,7 @@ void testResizeJbodVolumes() { KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), numberOfKafkaReplicas, 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), numberOfKafkaReplicas, 3) .editSpec() .editKafka() .withStorage( @@ -1171,7 +1171,7 @@ void testResizeJbodVolumes() { // ############################## // Setup topic, which has 3 replicas and 2 min.isr - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getContinuousTopicName(), 3, 3, 2, testStorage.getNamespaceName()).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getContinuousTopicName(), testStorage.getClusterName(), 3, 3, 2).build()); String producerAdditionConfiguration = "delivery.timeout.ms=40000\nrequest.timeout.ms=5000"; // Add transactional id to make producer transactional @@ -1261,7 +1261,7 @@ void testKRaftMode() { resourceManager.createResourceWithWait( KafkaNodePoolTemplates.brokerPoolPersistentStorage(testStorage.getNamespaceName(), testStorage.getBrokerPoolName(), testStorage.getClusterName(), kafkaReplicas).build(), KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), kafkaReplicas).build(), - KafkaTemplates.kafkaPersistentKRaft(testStorage.getClusterName(), kafkaReplicas).build() + KafkaTemplates.kafkaPersistentKRaft(testStorage.getNamespaceName(), testStorage.getClusterName(), kafkaReplicas).build() ); // Check that there is no ZooKeeper @@ -1270,7 +1270,7 @@ void testKRaftMode() { assertThat("No ZooKeeper Pods should exist", zkPods.size(), is(0)); // create KafkaTopic with replication factor on all brokers and min.insync replicas configuration to not loss data during Rolling Update. - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getContinuousTopicName(), 1, kafkaReplicas, kafkaReplicas - 1, testStorage.getNamespaceName()).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getContinuousTopicName(), testStorage.getClusterName(), 1, kafkaReplicas, kafkaReplicas - 1).build()); KafkaClients clients = ClientUtils.getContinuousPlainClientBuilder(testStorage).build(); LOGGER.info("Producing and Consuming messages with continuous clients: {}, {} in Namespace {}", testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), testStorage.getNamespaceName()); @@ -1368,7 +1368,7 @@ void testAdditionalVolumes() { .build() }; - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), numberOfKafkaReplicas, 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), numberOfKafkaReplicas, 3) .editSpec() .editKafka() .editTemplate() @@ -1383,7 +1383,7 @@ void testAdditionalVolumes() { .endSpec() .build()); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getClusterName(), testStorage.getNamespaceName(), 1) + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getNamespaceName(), testStorage.getClusterName(), 1) .editSpec() .editTemplate() .editPod() @@ -1396,7 +1396,7 @@ void testAdditionalVolumes() { .endSpec() .build()); - resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(testStorage.getClusterName(), + resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(testStorage.getNamespaceName(), testStorage.getClusterName(), KafkaResources.plainBootstrapAddress(testStorage.getClusterName()), 1) .editSpec() .editTemplate() diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java index dba0324394..0563d5c41b 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java @@ -62,10 +62,7 @@ void testKafkaWithVersion(final TestKafkaVersion testKafkaVersion) { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) - .editMetadata() - .withNamespace(testStorage.getNamespaceName()) - .endMetadata() + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editOrNewSpec() .editOrNewKafka() .withVersion(testKafkaVersion.version()) @@ -97,7 +94,7 @@ void testKafkaWithVersion(final TestKafkaVersion testKafkaVersion) { .build() ); - KafkaUser writeUser = KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), testStorage.getClusterName(), kafkaUserWrite) + KafkaUser writeUser = KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), kafkaUserWrite, testStorage.getClusterName()) .editSpec() .withNewKafkaUserAuthorizationSimple() .addNewAcl() @@ -111,7 +108,7 @@ void testKafkaWithVersion(final TestKafkaVersion testKafkaVersion) { .endSpec() .build(); - KafkaUser readUser = KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), testStorage.getClusterName(), kafkaUserRead) + KafkaUser readUser = KafkaUserTemplates.scramShaUser(testStorage.getNamespaceName(), kafkaUserRead, testStorage.getClusterName()) .editSpec() .withNewKafkaUserAuthorizationSimple() .addNewAcl() @@ -130,7 +127,7 @@ void testKafkaWithVersion(final TestKafkaVersion testKafkaVersion) { .endSpec() .build(); - KafkaUser tlsReadWriteUser = KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), testStorage.getClusterName(), kafkaUserReadWriteTls) + KafkaUser tlsReadWriteUser = KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), kafkaUserReadWriteTls, testStorage.getClusterName()) .editSpec() .withNewKafkaUserAuthorizationSimple() .addNewAcl() diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java index 12b3c8831f..b670ab831e 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java @@ -68,7 +68,7 @@ void testKafkaQuotasPluginIntegration() { ) ); resourceManager.createResourceWithWait( - KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 1, 1) + KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1) .editSpec() .editKafka() .addToConfig("client.quota.callback.static.storage.check-interval", "5") @@ -99,7 +99,7 @@ void testKafkaQuotasPluginIntegration() { .build() ); resourceManager.createResourceWithWait( - KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName(), testStorage.getNamespaceName()).build(), + KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName()).build(), KafkaUserTemplates.scramShaUser(testStorage).build() ); @@ -144,7 +144,7 @@ void testKafkaQuotasPluginWithBandwidthLimitation() { ) ); resourceManager.createResourceWithWait( - KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 1, 1) + KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1) .editSpec() .editKafka() .addToConfig("client.quota.callback.static.storage.check-interval", "5") @@ -174,7 +174,7 @@ void testKafkaQuotasPluginWithBandwidthLimitation() { .build() ); resourceManager.createResourceWithWait( - KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName(), testStorage.getNamespaceName()).build(), + KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName()).build(), KafkaUserTemplates.scramShaUser(testStorage).build() ); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java index 9cb3c15af9..77f32c4151 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java @@ -99,10 +99,7 @@ void testTieredStorageWithAivenPlugin() { ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3) - .editMetadata() - .withNamespace(suiteStorage.getNamespaceName()) - .endMetadata() + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(suiteStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withImage(Environment.getImageOutputRegistry(suiteStorage.getNamespaceName(), IMAGE_NAME, BUILT_IMAGE_TAG)) @@ -126,7 +123,7 @@ void testTieredStorageWithAivenPlugin() { .endSpec() .build()); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName(), suiteStorage.getNamespaceName()) + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(suiteStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName()) .editSpec() .addToConfig("file.delete.delay.ms", 1000) .addToConfig("local.retention.ms", 1000) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java index 681aeedb96..6c43930374 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java @@ -81,10 +81,7 @@ void testSimpleDynamicConfiguration() { KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), KAFKA_REPLICAS, 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), KAFKA_REPLICAS, 1) .editSpec() .editKafka() .withConfig(deepCopyOfShardKafkaConfig) @@ -142,10 +139,7 @@ void testUpdateToExternalListenerCausesRollingRestart() { KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), KAFKA_REPLICAS, 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), KAFKA_REPLICAS, 1) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -294,10 +288,7 @@ void testUpdateToExternalListenerCausesRollingRestartUsingExternalClients() { KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), KAFKA_REPLICAS, 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), KAFKA_REPLICAS, 1) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -313,8 +304,8 @@ void testUpdateToExternalListenerCausesRollingRestartUsingExternalClients() { Map brokerPods = PodUtils.podSnapshot(Environment.TEST_SUITE_NAMESPACE, testStorage.getBrokerSelector()); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName(), Environment.TEST_SUITE_NAMESPACE).build()); - resourceManager.createResourceWithWait(KafkaUserTemplates.tlsUser(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName(), testStorage.getKafkaUsername()).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, testStorage.getTopicName(), testStorage.getClusterName()).build()); + resourceManager.createResourceWithWait(KafkaUserTemplates.tlsUser(Environment.TEST_SUITE_NAMESPACE, testStorage.getKafkaUsername(), testStorage.getClusterName()).build()); ExternalKafkaClient externalKafkaClientTls = new ExternalKafkaClient.Builder() .withTopicName(testStorage.getTopicName()) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java index 63bf475b5e..5bc1c5d3af 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java @@ -237,11 +237,8 @@ void setup() { KafkaNodePoolTemplates.controllerPoolPersistentStorage(suiteTestStorage.getNamespaceName(), suiteTestStorage.getControllerPoolName(), suiteTestStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(suiteTestStorage.getClusterName(), 3) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() - .build(), + resourceManager.createResourceWithWait( + KafkaTemplates.kafkaPersistent(suiteTestStorage.getNamespaceName(), suiteTestStorage.getClusterName(), 3).build(), ScraperTemplates.scraperPod(Environment.TEST_SUITE_NAMESPACE, suiteTestStorage.getScraperName()).build() ); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java index ba59084f96..ff3c8f31de 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java @@ -134,7 +134,7 @@ void testSendMessagesPlainAnonymous() { KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 3).build()); resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage).build()); LOGGER.info("Transmitting messages over plain transport and without auth.Bootstrap address: {}", KafkaResources.plainBootstrapAddress(testStorage.getClusterName())); @@ -163,7 +163,7 @@ void testSendMessagesTlsAuthenticated() { ) ); // Use a Kafka with plain listener disabled - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners( @@ -228,7 +228,7 @@ void testSendMessagesPlainScramSha() { ) ); // Use a Kafka with plain listener disabled - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -293,7 +293,7 @@ void testSendMessagesTlsScramSha() { ) ); // Use a Kafka with plain listener disabled - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -361,7 +361,7 @@ void testSendMessagesCustomListenerTlsScramSha() { ) ); // Use a Kafka with plain listener disabled - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -410,7 +410,7 @@ void testNodePort() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3, 1) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 1) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -514,7 +514,7 @@ void testOverrideNodePortConfiguration() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3, 1) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 1) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -577,7 +577,7 @@ void testNodePortTls() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3, 1) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 1) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -592,8 +592,8 @@ void testNodePortTls() { .endSpec() .build()); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName(), testStorage.getNamespaceName()).build()); - resourceManager.createResourceWithWait(KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), testStorage.getClusterName(), testStorage.getKafkaUsername()).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName()).build()); + resourceManager.createResourceWithWait(KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), testStorage.getKafkaUsername(), testStorage.getClusterName()).build()); ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder() .withTopicName(testStorage.getTopicName()) @@ -623,7 +623,7 @@ void testLoadBalancer() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -670,7 +670,7 @@ void testLoadBalancerTls() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -688,7 +688,7 @@ void testLoadBalancerTls() { .endSpec() .build()); - resourceManager.createResourceWithWait(KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), testStorage.getClusterName(), testStorage.getKafkaUsername()).build()); + resourceManager.createResourceWithWait(KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), testStorage.getKafkaUsername(), testStorage.getClusterName()).build()); ServiceUtils.waitUntilAddressIsReachable(KafkaResource.kafkaClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus().getListeners().get(0).getAddresses().get(0).getHost()); @@ -719,7 +719,7 @@ void testClusterIp() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -750,7 +750,7 @@ void testClusterIpTls() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -802,7 +802,7 @@ void testCustomSoloCertificatesForNodePort() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3, 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -894,7 +894,7 @@ void testCustomChainCertificatesForNodePort() { KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 1, 1) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -978,7 +978,7 @@ void testCustomSoloCertificatesForLoadBalancer() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -1065,7 +1065,7 @@ void testCustomChainCertificatesForLoadBalancer() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -1161,7 +1161,7 @@ void testCustomSoloCertificatesForRoute() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -1255,7 +1255,7 @@ void testCustomChainCertificatesForRoute() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -1342,7 +1342,7 @@ void testCustomCertLoadBalancerAndTlsRollingUpdate() { KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -1602,7 +1602,7 @@ void testCustomCertNodePortAndTlsRollingUpdate() { KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -1856,7 +1856,7 @@ void testCustomCertRouteAndTlsRollingUpdate() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -2108,7 +2108,7 @@ void testNonExistingCustomCertificate() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithoutWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 1, 1) + resourceManager.createResourceWithoutWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -2149,7 +2149,7 @@ void testCertificateWithNonExistingDataCrt() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithoutWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 1, 1) + resourceManager.createResourceWithoutWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -2191,7 +2191,7 @@ void testCertificateWithNonExistingDataKey() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithoutWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 1, 1) + resourceManager.createResourceWithoutWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -2261,7 +2261,7 @@ void testMessagesTlsScramShaWithPredefinedPassword() { KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -2276,7 +2276,7 @@ void testMessagesTlsScramShaWithPredefinedPassword() { .endSpec() .build(), kafkaUser, - KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getUsername(), testStorage.getNamespaceName()).build() + KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getUsername(), testStorage.getClusterName()).build() ); final String boostrapAddress = KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9096"; @@ -2374,7 +2374,7 @@ void testAdvertisedHostNamesAppearsInBrokerCerts() throws CertificateException { ) ); resourceManager.createResourceWithWait( - KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3, 3) + KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 3) .editSpec() .editKafka() .withListeners(asList( diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java index d60a938e42..9f4148a886 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java @@ -166,10 +166,7 @@ private void runListenersTest(List listeners, String clust ) ); // exercise phase - resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(clusterName, 3) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaTemplates.kafkaEphemeral(Environment.TEST_SUITE_NAMESPACE, clusterName, 3) .editSpec() .editKafka() .withListeners(listeners) @@ -180,7 +177,7 @@ private void runListenersTest(List listeners, String clust // only on thread can access to verification phase (here is a lot of variables which can be modified in run-time (data-race)) synchronized (lock) { String kafkaUsername = KafkaUserUtils.generateRandomNameOfKafkaUser(); - KafkaUser kafkaUserInstance = KafkaUserTemplates.tlsUser(Environment.TEST_SUITE_NAMESPACE, clusterName, kafkaUsername).build(); + KafkaUser kafkaUserInstance = KafkaUserTemplates.tlsUser(Environment.TEST_SUITE_NAMESPACE, kafkaUsername, clusterName).build(); resourceManager.createResourceWithWait(kafkaUserInstance); @@ -189,7 +186,7 @@ private void runListenersTest(List listeners, String clust final String consumerName = "consumer-" + new Random().nextInt(Integer.MAX_VALUE); String topicName = KafkaTopicUtils.generateRandomNameOfTopic(); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(clusterName, topicName, Environment.TEST_SUITE_NAMESPACE).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, topicName, clusterName).build()); boolean isTlsEnabled = listener.isTls(); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/log/LogSettingST.java b/systemtest/src/test/java/io/strimzi/systemtest/log/LogSettingST.java index 39f5767864..f12fcfbd61 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/log/LogSettingST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/log/LogSettingST.java @@ -205,8 +205,8 @@ void testKafkaLogSetting() { Map brokerPods = PodUtils.podSnapshot(Environment.TEST_SUITE_NAMESPACE, brokerSelector); Map controllerPods = PodUtils.podSnapshot(Environment.TEST_SUITE_NAMESPACE, controllerSelector); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(LOG_SETTING_CLUSTER_NAME, testStorage.getTopicName(), Environment.TEST_SUITE_NAMESPACE).build()); - resourceManager.createResourceWithWait(KafkaUserTemplates.tlsUser(Environment.TEST_SUITE_NAMESPACE, LOG_SETTING_CLUSTER_NAME, testStorage.getKafkaUsername()).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, testStorage.getTopicName(), LOG_SETTING_CLUSTER_NAME).build()); + resourceManager.createResourceWithWait(KafkaUserTemplates.tlsUser(Environment.TEST_SUITE_NAMESPACE, testStorage.getKafkaUsername(), LOG_SETTING_CLUSTER_NAME).build()); LOGGER.info("Checking if Kafka, ZooKeeper, TO and UO of cluster: {} has log level set properly", LOG_SETTING_CLUSTER_NAME); StUtils.getKafkaConfigurationConfigMaps(Environment.TEST_SUITE_NAMESPACE, LOG_SETTING_CLUSTER_NAME) @@ -276,10 +276,7 @@ void testKafkaLogSetting() { void testConnectLogSetting() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); - resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(testStorage.getClusterName(), Environment.TEST_SUITE_NAMESPACE, LOG_SETTING_CLUSTER_NAME, 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnect(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName(), LOG_SETTING_CLUSTER_NAME, 1) .editSpec() .withNewInlineLogging() .withLoggers(CONNECT_LOGGERS) @@ -311,10 +308,7 @@ void testConnectLogSetting() { void testMirrorMakerLogSetting() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); - resourceManager.createResourceWithWait(KafkaMirrorMakerTemplates.kafkaMirrorMaker(testStorage.getClusterName(), LOG_SETTING_CLUSTER_NAME, GC_LOGGING_SET_NAME, "my-group", 1, false) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaMirrorMakerTemplates.kafkaMirrorMaker(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName(), LOG_SETTING_CLUSTER_NAME, GC_LOGGING_SET_NAME, "my-group", 1, false) .editSpec() .withNewInlineLogging() .withLoggers(MIRROR_MAKER_LOGGERS) @@ -346,10 +340,7 @@ void testMirrorMakerLogSetting() { void testMirrorMaker2LogSetting() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); - resourceManager.createResourceWithWait(KafkaMirrorMaker2Templates.kafkaMirrorMaker2(testStorage.getClusterName(), LOG_SETTING_CLUSTER_NAME, GC_LOGGING_SET_NAME, 1, false) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaMirrorMaker2Templates.kafkaMirrorMaker2(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName(), GC_LOGGING_SET_NAME, LOG_SETTING_CLUSTER_NAME, 1, false) .editSpec() .withNewInlineLogging() .withLoggers(MIRROR_MAKER_LOGGERS) @@ -383,10 +374,7 @@ void testMirrorMaker2LogSetting() { void testBridgeLogSetting() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); - resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(testStorage.getClusterName(), LOG_SETTING_CLUSTER_NAME, KafkaResources.plainBootstrapAddress(LOG_SETTING_CLUSTER_NAME), 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + resourceManager.createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName(), KafkaResources.plainBootstrapAddress(LOG_SETTING_CLUSTER_NAME), 1) .editSpec() .withNewInlineLogging() .withLoggers(BRIDGE_LOGGERS) @@ -582,10 +570,7 @@ void setup() { ) ); - Kafka logSettingKafka = KafkaTemplates.kafkaPersistent(LOG_SETTING_CLUSTER_NAME, 3, 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + Kafka logSettingKafka = KafkaTemplates.kafkaPersistent(Environment.TEST_SUITE_NAMESPACE, LOG_SETTING_CLUSTER_NAME, 3, 1) .editSpec() .editKafka() .withNewInlineLogging() @@ -629,10 +614,7 @@ void setup() { .build(); // deploying second Kafka here because of MM and MM2 tests - Kafka gcLoggingKafka = KafkaTemplates.kafkaPersistent(GC_LOGGING_SET_NAME, 1, 1) - .editMetadata() - .withNamespace(Environment.TEST_SUITE_NAMESPACE) - .endMetadata() + Kafka gcLoggingKafka = KafkaTemplates.kafkaPersistent(Environment.TEST_SUITE_NAMESPACE, GC_LOGGING_SET_NAME, 1, 1) .editSpec() .editKafka() .withNewJvmOptions() diff --git a/systemtest/src/test/java/io/strimzi/systemtest/log/LoggingChangeST.java b/systemtest/src/test/java/io/strimzi/systemtest/log/LoggingChangeST.java index e3a9989cad..6477a4ff00 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/log/LoggingChangeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/log/LoggingChangeST.java @@ -205,7 +205,7 @@ void testJSONFormatLogging() { KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 3).build() ) ); - Kafka kafka = KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 3) + Kafka kafka = KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 3, 3) .editOrNewSpec() .editKafka() //.withLogging(new ExternalLoggingBuilder().withName(configMapKafkaName).build()) @@ -278,7 +278,7 @@ void testDynamicallySetEOloggingLevels() { KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), testStorage.getClusterName(), 1).build() ) ); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 1, 1) + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1) .editSpec() .editEntityOperator() .editTopicOperator() @@ -485,9 +485,9 @@ void testDynamicallySetBridgeLoggingLevels() { ); // create resources async resourceManager.createResourceWithoutWait( - KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 1, 1).build(), + KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1).build(), ScraperTemplates.scraperPod(testStorage.getNamespaceName(), testStorage.getScraperName()).build(), - KafkaBridgeTemplates.kafkaBridge(testStorage.getClusterName(), KafkaResources.tlsBootstrapAddress(testStorage.getClusterName()), 1) + KafkaBridgeTemplates.kafkaBridge(testStorage.getNamespaceName(), testStorage.getClusterName(), KafkaResources.tlsBootstrapAddress(testStorage.getClusterName()), 1) .editSpec() .withLogging(ilOff) .endSpec() @@ -720,7 +720,7 @@ void testDynamicallyAndNonDynamicSetConnectLoggingLevels() { final Pattern log4jPatternInfoLevel = Pattern.compile("^(?[\\d-]+) (?