From d8a7fb2b7e9050b014cd2ec68a69b8fa02f47265 Mon Sep 17 00:00:00 2001 From: Giuseppe Villani Date: Fri, 14 Feb 2025 10:43:35 +0100 Subject: [PATCH] Fixes #3799: Apache Kafka procedures (#4172) * Fixes #3799: Apache Kafka procedures * removed unused stuff from procedures * code cleanup * removed code unused by procedures * Added docs and cleanup * fix tests * fix other tests * changed deps * added dep * upgraded deps * removed extradep * added procs in extended*.txt * cleanup * Revert "cleanup" This reverts commit 6a07e3f3999a05cb1fec1c999c8ca018c7c56182. * restored stuff * fixed tests * cleanup * resolved deps conflict error --- docs/asciidoc/modules/ROOT/nav.adoc | 1 + .../database-integration/kafka/cloud.adoc | 28 + .../kafka/consumer-configuration.adoc | 33 + .../database-integration/kafka/index.adoc | 73 + .../kafka/procedures.adoc | 286 ++++ .../kafka/producer-configuration.adoc | 39 + .../main/java/apoc/ExtendedApocConfig.java | 1 + .../apoc/ExtendedApocGlobalComponents.java | 68 +- .../main/kotlin/apoc/kafka/KafkaHandler.kt | 48 + .../kotlin/apoc/kafka/PublishProcedures.kt | 108 ++ .../kotlin/apoc/kafka/config/StreamsConfig.kt | 62 + .../kafka/consumer/StreamsEventConsumer.kt | 24 + .../apoc/kafka/consumer/StreamsEventSink.kt | 20 + .../StreamsEventSinkQueryExecution.kt | 32 + .../StreamsSinkConfigurationListener.kt | 29 + .../kafka/KafkaAutoCommitEventConsumer.kt | 138 ++ .../kafka/consumer/kafka/KafkaEventSink.kt | 48 + .../kafka/KafkaManualCommitEventConsumer.kt | 118 ++ .../consumer/kafka/KafkaSinkConfiguration.kt | 76 ++ .../procedures/StreamsSinkProcedures.kt | 118 ++ .../kafka/consumer/utils/ConsumerUtils.kt | 13 + .../kotlin/apoc/kafka/events/KafkaStatus.kt | 3 + .../kotlin/apoc/kafka/events/StreamsEvent.kt | 70 + .../apoc/kafka/extensions/CommonExtensions.kt | 83 ++ .../kafka/extensions/CoroutineExtensions.kt | 44 + .../DatabaseManagementServiceExtensions.kt | 28 + .../GraphDatabaseServerExtensions.kt | 32 + .../kotlin/apoc/kafka/producer/Extensions.kt | 82 ++ .../kafka/producer/RoutingConfiguration.kt | 251 ++++ .../StreamsEventRouterConfiguration.kt | 99 ++ .../StreamsRouterConfigurationListener.kt | 38 + .../producer/events/StreamsEventBuilder.kt | 298 +++++ .../kafka/producer/kafka/KafkaAdminService.kt | 57 + .../producer/kafka/KafkaConfiguration.kt | 105 ++ .../kafka/producer/kafka/KafkaEventRouter.kt | 194 +++ .../apoc/kafka/service/StreamsSinkService.kt | 42 + .../main/kotlin/apoc/kafka/service/Topics.kt | 127 ++ .../apoc/kafka/service/errors/ErrorService.kt | 105 ++ .../kafka/service/errors/KafkaErrorService.kt | 97 ++ .../sink/strategy/CUDIngestionStrategy.kt | 282 ++++ .../sink/strategy/IngestionStrategy.kt | 37 + .../strategy/NodePatternIngestionStrategy.kt | 91 ++ .../sink/strategy/PatternConfiguration.kt | 198 +++ .../RelationshipPatternIngestionStrategy.kt | 120 ++ .../sink/strategy/SchemaIngestionStrategy.kt | 185 +++ .../strategy/SourceIdIngestionStrategy.kt | 110 ++ .../main/kotlin/apoc/kafka/utils/JSONUtils.kt | 146 ++ .../main/kotlin/apoc/kafka/utils/KafkaUtil.kt | 341 +++++ .../src/main/resources/extendedCypher25.txt | 5 +- .../src/main/resources/extendedCypher5.txt | 5 +- .../test/java/apoc/util/ExtendedTestUtil.java | 24 + .../apoc/kafka/common/CommonExtensionsTest.kt | 74 + .../common/errors/KafkaErrorServiceTest.kt | 83 ++ .../strategy/CUDIngestionStrategyTest.kt | 1185 +++++++++++++++++ .../NodePatternIngestionStrategyTest.kt | 196 +++ .../strategy/PatternConfigurationTest.kt | 492 +++++++ ...elationshipPatternIngestionStrategyTest.kt | 196 +++ .../strategy/SchemaIngestionStrategyTest.kt | 496 +++++++ .../strategy/SourceIdIngestionStrategyTest.kt | 331 +++++ .../apoc/kafka/common/support/Assert.kt | 37 + .../kafka/common/support/KafkaTestUtils.kt | 62 + .../common/support/Neo4jContainerExtension.kt | 178 +++ .../kafka/common/utils/CoroutineUtilsTest.kt | 63 + .../apoc/kafka/common/utils/Neo4jUtilsTest.kt | 23 + .../kafka/common/utils/ProcedureUtilsTest.kt | 22 + .../kafka/common/utils/SchemaUtilsTest.kt | 131 ++ .../kafka/common/utils/StreamsUtilsTest.kt | 35 + .../kafka/KafkaConsumeProceduresTSE.kt | 189 +++ .../consumer/kafka/KafkaEventSinkBaseTSE.kt | 117 ++ .../consumer/kafka/KafkaEventSinkSuiteIT.kt | 101 ++ .../producer/RoutingConfigurationTest.kt | 348 +++++ .../events/StreamsEventBuilderTest.kt | 409 ++++++ .../integrations/KafkaEventRouterBaseTSE.kt | 78 ++ .../KafkaEventRouterProcedureTSE.kt | 289 ++++ .../integrations/KafkaEventRouterSuiteIT.kt | 53 + .../KafkaEventRouterTestCommon.kt | 53 + .../producer/kafka/KafkaConfigurationTest.kt | 52 + .../apoc/nlp/aws/AWSProceduresAPITest.kt | 0 extra-dependencies/kafka/build.gradle | 31 + .../kafka/gradle/wrapper/gradle-wrapper.jar | Bin 0 -> 54212 bytes .../gradle/wrapper/gradle-wrapper.properties | 6 + extra-dependencies/kafka/gradlew | 183 +++ extra-dependencies/kafka/gradlew.bat | 100 ++ extra-dependencies/settings.gradle | 1 + 84 files changed, 10256 insertions(+), 20 deletions(-) create mode 100644 docs/asciidoc/modules/ROOT/pages/database-integration/kafka/cloud.adoc create mode 100644 docs/asciidoc/modules/ROOT/pages/database-integration/kafka/consumer-configuration.adoc create mode 100644 docs/asciidoc/modules/ROOT/pages/database-integration/kafka/index.adoc create mode 100644 docs/asciidoc/modules/ROOT/pages/database-integration/kafka/procedures.adoc create mode 100644 docs/asciidoc/modules/ROOT/pages/database-integration/kafka/producer-configuration.adoc create mode 100644 extended/src/main/kotlin/apoc/kafka/KafkaHandler.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/PublishProcedures.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/config/StreamsConfig.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/consumer/StreamsEventConsumer.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/consumer/StreamsEventSink.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/consumer/StreamsEventSinkQueryExecution.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/consumer/StreamsSinkConfigurationListener.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaAutoCommitEventConsumer.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaEventSink.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaManualCommitEventConsumer.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaSinkConfiguration.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/consumer/procedures/StreamsSinkProcedures.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/consumer/utils/ConsumerUtils.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/events/KafkaStatus.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/events/StreamsEvent.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/extensions/CommonExtensions.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/extensions/CoroutineExtensions.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/extensions/DatabaseManagementServiceExtensions.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/extensions/GraphDatabaseServerExtensions.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/producer/Extensions.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/producer/RoutingConfiguration.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/producer/StreamsEventRouterConfiguration.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/producer/StreamsRouterConfigurationListener.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/producer/events/StreamsEventBuilder.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/producer/kafka/KafkaAdminService.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/producer/kafka/KafkaConfiguration.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/producer/kafka/KafkaEventRouter.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/service/StreamsSinkService.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/service/Topics.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/service/errors/ErrorService.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/service/errors/KafkaErrorService.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/service/sink/strategy/CUDIngestionStrategy.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/service/sink/strategy/IngestionStrategy.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/service/sink/strategy/NodePatternIngestionStrategy.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/service/sink/strategy/PatternConfiguration.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/service/sink/strategy/RelationshipPatternIngestionStrategy.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/service/sink/strategy/SchemaIngestionStrategy.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/service/sink/strategy/SourceIdIngestionStrategy.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/utils/JSONUtils.kt create mode 100644 extended/src/main/kotlin/apoc/kafka/utils/KafkaUtil.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/CommonExtensionsTest.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/errors/KafkaErrorServiceTest.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/strategy/CUDIngestionStrategyTest.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/strategy/NodePatternIngestionStrategyTest.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/strategy/PatternConfigurationTest.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/strategy/RelationshipPatternIngestionStrategyTest.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/strategy/SchemaIngestionStrategyTest.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/strategy/SourceIdIngestionStrategyTest.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/support/Assert.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/support/KafkaTestUtils.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/support/Neo4jContainerExtension.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/utils/CoroutineUtilsTest.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/utils/Neo4jUtilsTest.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/utils/ProcedureUtilsTest.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/utils/SchemaUtilsTest.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/common/utils/StreamsUtilsTest.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/consumer/kafka/KafkaConsumeProceduresTSE.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/consumer/kafka/KafkaEventSinkBaseTSE.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/consumer/kafka/KafkaEventSinkSuiteIT.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/producer/RoutingConfigurationTest.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/producer/events/StreamsEventBuilderTest.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterBaseTSE.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterProcedureTSE.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterSuiteIT.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterTestCommon.kt create mode 100644 extended/src/test/kotlin/apoc/kafka/producer/kafka/KafkaConfigurationTest.kt delete mode 100644 extended/src/test/kotlin/apoc/nlp/aws/AWSProceduresAPITest.kt create mode 100644 extra-dependencies/kafka/build.gradle create mode 100644 extra-dependencies/kafka/gradle/wrapper/gradle-wrapper.jar create mode 100644 extra-dependencies/kafka/gradle/wrapper/gradle-wrapper.properties create mode 100755 extra-dependencies/kafka/gradlew create mode 100644 extra-dependencies/kafka/gradlew.bat diff --git a/docs/asciidoc/modules/ROOT/nav.adoc b/docs/asciidoc/modules/ROOT/nav.adoc index 13b1053a79..86917e75e8 100644 --- a/docs/asciidoc/modules/ROOT/nav.adoc +++ b/docs/asciidoc/modules/ROOT/nav.adoc @@ -46,6 +46,7 @@ include::partial$generated-documentation/nav.adoc[] ** xref::database-integration/load-ldap.adoc[] ** xref::database-integration/redis.adoc[] ** xref::database-integration/vectordb/index.adoc[] + ** xref::database-integration/kafka/index.adoc[] * xref:graph-updates/index.adoc[] ** xref::graph-updates/uuid.adoc[] diff --git a/docs/asciidoc/modules/ROOT/pages/database-integration/kafka/cloud.adoc b/docs/asciidoc/modules/ROOT/pages/database-integration/kafka/cloud.adoc new file mode 100644 index 0000000000..1965cf37cb --- /dev/null +++ b/docs/asciidoc/modules/ROOT/pages/database-integration/kafka/cloud.adoc @@ -0,0 +1,28 @@ + += Confluent Cloud + +[[confluent_cloud]] +Configuring a connection to a Confluent Cloud instance should follow +link:{url-confluent-java-client}[Confluent's Java Client] configuration advice. +At a minimum, to configure this, you will need: + +* `BOOTSTRAP_SERVER_URL` +* `API_KEY` +* `API_SECRET` + +More specifically the procedures has to be configured as follows: + +.neo4j.conf +[source,ini] +---- +apoc.kafka.bootstrap.servers=${BOOTSTRAP_SERVER_URL} +apoc.kafka.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="${API_KEY}" password="${API_SECRET}"; +apoc.kafka.ssl.endpoint.identification.algorithm=https +apoc.kafka.security.protocol=SASL_SSL +apoc.kafka.sasl.mechanism=PLAIN +apoc.kafka.request.timeout.ms=20000 +apoc.kafka.retry.backoff.ms=500 +---- + +Make sure to replace `BOOTSTRAP_SERVER_URL`, `API_SECRET`, and `API_KEY` with the values that Confluent Cloud +gives you when you generate an API access key. diff --git a/docs/asciidoc/modules/ROOT/pages/database-integration/kafka/consumer-configuration.adoc b/docs/asciidoc/modules/ROOT/pages/database-integration/kafka/consumer-configuration.adoc new file mode 100644 index 0000000000..e3a24d0f7d --- /dev/null +++ b/docs/asciidoc/modules/ROOT/pages/database-integration/kafka/consumer-configuration.adoc @@ -0,0 +1,33 @@ +=== Configuration summary + +You can set the following Kafka configuration values in your `neo4j.conf`, here are the defaults. + +.neo4j.conf +[source,subs="verbatim,attributes"] +---- +apoc.kafka.bootstrap.servers=localhost:9092 +apoc.kafka.auto.offset.reset=earliest +apoc.kafka.group.id=neo4j +apoc.kafka.enable.auto.commit=true +apoc.kafka.key.deserializer=org.apache.kafka.common.serialization.ByteArrayDeserializer +apoc.kafka.value.deserializer=org.apache.kafka.common.serialization.ByteArrayDeserializer + +{environment}.topic.cypher.= +{environment}.topic.cdc.sourceId= +{environment}.topic.cdc.schema= +{environment}.topic.cud= +{environment}.topic.pattern.node.= +{environment}.topic.pattern.relationship.= +{environment}.enabled= + +---- + +See the https://kafka.apache.org/documentation/#brokerconfigs[Apache Kafka documentation] for details on these settings. + +[NOTE] + +if `apoc.kafka.cluster.only` is set to true, APOC Kafka will refuse to start in single instance mode, +or when run in the context of the backup operation. This is an important safety guard to ensure that operations do not occur in unexpected situations for production deploys + +See the https://kafka.apache.org/documentation/#brokerconfigs[Apache Kafka documentation] for details on these settings. + diff --git a/docs/asciidoc/modules/ROOT/pages/database-integration/kafka/index.adoc b/docs/asciidoc/modules/ROOT/pages/database-integration/kafka/index.adoc new file mode 100644 index 0000000000..a2d5d2dde8 --- /dev/null +++ b/docs/asciidoc/modules/ROOT/pages/database-integration/kafka/index.adoc @@ -0,0 +1,73 @@ += Kafka + +[[kafka]] + + +[[apoc_neo4j_plugin_quickstart]] +== APOC Kafka Procedures + +NOTE: to enable the Kafka dependencies we need to set the APOC configuration `apoc.kafka.enabled=true` + +Any configuration option that starts with `apoc.kafka.` controls how the procedures itself behaves. + +=== Install dependencies + +The Kafka dependencies are included in https://github.com/neo4j-contrib/neo4j-apoc-procedures/releases/download/{apoc-release}/apoc-kafka-dependencies-{apoc-release}-all.jar[apoc-kafka-dependencies-{apoc-release}-all.jar^], which can be downloaded from the https://github.com/neo4j-contrib/neo4j-apoc-procedures/releases/tag/{apoc-release}[releases page^]. +Once that file is downloaded, it should be placed in the `plugins` directory and the Neo4j Server restarted. + +[[kafka-settings]] +=== Kafka settings + +Any configuration option that starts with `apoc.kafka.` will be passed to the underlying Kafka driver. Neo4j +Kafka procedures uses the official Confluent Kafka producer and consumer java clients. +Configuration settings which are valid for those connectors will also work for APOC Kafka. + +For example, in the Kafka documentation linked below, the configuration setting named `batch.size` should be stated as +`apoc.kafka.batch.size` in APOC Kafka. + +The following are common configuration settings you may wish to use. +.Most Common Needed Configuration Settings +|=== +|Setting Name |Description |Default Value + +|apoc.kafka.max.poll.records +|The maximum number of records to pull per batch from Kafka. Increasing this number will mean +larger transactions in Neo4j memory and may improve throughput. +|500 + +|apoc.kafka.buffer.memory +|The total bytes of memory the producer can use to buffer records waiting. Use this to adjust +how much memory the procedures may require to hold messages not yet delivered to Neo4j +|33554432 + +|apoc.kafka.batch.size +|(Producer only) The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps performance on both the client and the server. This configuration controls the default batch size in bytes. +|16384 + +|apoc.kafka.max.partition.fetch.bytes +|(Consumer only) The maximum amount of data per-partition the server will return. Records are fetched in batches by the consumer. If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. +|1048576 + +|apoc.kafka.group.id +|A unique string that identifies the consumer group this consumer belongs to. +|N/A +|=== + +=== Configure Kafka Connection + +If you are running locally or against a standalone machine, configure `apoc.conf` to point to that server: + +.neo4j.conf +[source,ini] +---- +apoc.kafka.bootstrap.servers=localhost:9092 +---- + +If you are using Confluent Cloud (managed Kafka), you can connect to Kafka as described in +the xref:database-integration/kafka/cloud.adoc#confluent_cloud[Confluent Cloud] section + + +==== Restart Neo4j + +Once the plugin is installed and configured, restarting the database will make it active. +If you have configured Neo4j to consume from kafka, it will begin immediately processing messages. \ No newline at end of file diff --git a/docs/asciidoc/modules/ROOT/pages/database-integration/kafka/procedures.adoc b/docs/asciidoc/modules/ROOT/pages/database-integration/kafka/procedures.adoc new file mode 100644 index 0000000000..00198fbf8c --- /dev/null +++ b/docs/asciidoc/modules/ROOT/pages/database-integration/kafka/procedures.adoc @@ -0,0 +1,286 @@ += APOC Kafka - Procedures +:environment: apoc.kafka + +ifdef::env-docs[] +[abstract] +-- +This chapter describes the APOC Kafka Procedures in the APOC Kafka Library. +Use this section to configure Neo4j to know how procedures allow the functionality of the procedures +to be used ad-hoc in any Cypher query. +-- +endif::env-docs[] + +The APOC Kafka procedures comes out with a list of procedures. + +== Configuration + +You can enable/disable the procedures by changing this variable inside the `neo4j.conf` + +.neo4j.conf +[source,subs="verbatim,attributes"] +---- +{environment}.procedures.enabled= +---- + +[NOTE] +==== +Please note that by default the `dbms.security.procedures.whitelist` property is disabled, so Neo4j will load all +procedures found. +If you enable it then you have also to declare a comma separated list of procedures to be loaded by default. For example: + +[source, properties] +---- +dbms.security.procedures.whitelist=apoc.* +---- + +If you try to CALL one of the Streams procedures without declaring them into the whitelist, you will receive an error like +the following: + +image::ROOT:procedure_not_found.png[title="APOC Kafka procedure not found", align="center"] +==== + +=== Multi Database Support + +Neo4j 4.0 Enterprise has https://neo4j.com/docs/operations-manual/4.0/manage-databases/[multi-tenancy support], +in order to support this feature you can set for each database instance a configuration suffix with the following pattern +`` to the properties in your neo4j.conf file. + +So, to enable the APOC Kafka procedures the following property should be added: + +.neo4j.conf +[source,subs="verbatim"] +---- +apoc.kafka.procedures.enabled.= +---- + +So if you have a instance name `foo` you can specify a configuration in this way: + +.neo4j.conf +[source] +---- +apoc.kafka.procedures.enabled.foo= +---- + +The old property: + +.neo4j.conf +[source] +---- +apoc.kafka.procedures.enabled= +---- + +are still valid, and it refers to Neo4j's default db instance. + +In particular the following property will be used as default value +for non-default db instances, in case of the specific configuration params is not provided: + +[source] +---- +apoc.kafka.procedures.enabled= +---- + +== apoc.kafka.publish + +This procedure allows custom message streaming from Neo4j to the configured environment by using the underlying configured Producer. + +Uses: + +`CALL apoc.kafka.publish('my-topic', 'Hello World from Neo4j!')` + +The message retrieved from the Consumer is the following: + +`{"payload":"Hello world from Neo4j!"}` + +If you use a local Docker Compose setup, you can check for these messages with: + +`docker exec -it kafka kafka-console-consumer --topic my-topic --bootstrap-server kafka:9092` + +Input Parameters: + +[cols="3*",options="header"] +|=== +|Variable Name +|Type +|Description + +|`topic` +|String +|The topic where you want to publish the data + +|`payload` +|Object +|The data that you want to stream + +|=== + +Configuration parameters: +[cols="3*",options="header"] +|=== +|Name +|Type +|Description + +|`key` +|Object +|The key value of message that you want to stream. Please note that if the key doesn't exist, you get a message with a random UUID as key value + +|`partition` +|Int +|The partition of message that you want to stream + +|=== + +You can send any kind of data in the payload, nodes, relationships, paths, lists, maps, scalar values and nested versions thereof. + +In case of nodes or relationships if the topic is defined in the patterns provided by the configuration their properties will be filtered in according with the configuration. + + +== apoc.kafka.publish.sync + +Similar to `apoc.kafka.publish` procedure, but in a synchronous way. + +Uses: + +`CALL apoc.kafka.publish.sync('my-topic', 'my-payload', {}) YIELD value RETURN value` + +This procedure return a `RecordMetadata` value like this `{"timestamp": 1, "offset": 2, "partition", 3, "keySize", 4, "valueSize", 5}` + +[cols="2*",options="header"] +|=== +|Variable Name +|Description + +|`timestamp` +|The timestamp of the record in the topic/partition. + +|`offset` +|The offset of the record in the topic/partition. + +|`partition` +|The partition the record was sent to + +|`keySize` +|The size of the serialized, uncompressed key in bytes + +|`valueSize` +|The size of the serialized, uncompressed value in bytes +|=== + +== apoc.kafka.consume + +This procedure allows to consume messages from a given topic. + +Uses: + +`CALL apoc.kafka.consume('my-topic', {}) YIELD event RETURN event` + +Example: +Imagine you have a producer that publish events like this `{"name": "Andrea", "surname": "Santurbano"}`, we can create user nodes in this way: + +[source,cypher] +---- +CALL apoc.kafka.consume('my-topic') YIELD event +CREATE (p:Person{firstName: event.data.name, lastName: event.data.surname}) +---- + +In case you want to read a specific offset of a topic partition you can do it by executing the following query: + +[source,cypher] +---- +CALL apoc.kafka.consume('my-topic', {timeout: 5000, partitions: [{partition: 0, offset: 30}]}) YIELD event +CREATE (p:Person{firstName: event.data.name, lastName: event.data.surname}) +---- + +Input Parameters: + +[cols="3*",options="header"] +|=== +|Variable Name +|Type +|Description + +|`topic` +|String +|The topic where you want to publish the data + +|`config` +|Map +|The configuration parameters + +|=== + +=== Available configuration parameters + +[cols="3*",options="header"] +|=== +|Variable Name +|Type +|Description + +|`timeout` +|Number (default `1000`) +|Define the time that the procedure should be listen the topic + +|`from` +|String +|It's the Kafka configuration parameter `auto.offset.reset`. +If not specified it inherits the underlying `kafka.auto.offset.reset` value + +|`groupId` +|String +|It's the Kafka configuration parameter `group.id`. +If not specified it inherits the underlying `kafka.group.id` value + +|`autoCommit` +|Boolean (default `true`) +|It's the Kafka configuration parameter `enable.auto.commit`. +If not specified it inherits the underlying `kafka.enable.auto.commit` value + +|`commit` +|Boolean (default `true`) +|In case of `autoCommit` is set to `false` you can decide if you want to commit the data. + +|`broker` +|String +|The comma separated string of Kafka nodes url. +If not specified it inherits the underlying `kafka.bootstrap.servers` value + +|`partitions` +|List> +|The map contains the information about partition and offset in order to start reading from a + +|`keyDeserializer` +|String +|The supported deserializer for the Kafka Record Key +If not specified it inherits the underlying `kafka.key.deserializer` value. +Supported deserializers are: `org.apache.kafka.common.serialization.ByteArrayDeserializer` and `io.confluent.kafka.serializers.KafkaAvroDeserializer` + +|`valueDeserializer` +|String +|The supported deserializer for the Kafka Record Value +If not specified it inherits the underlying `kafka.value.deserializer` value +Supported deserializers are: `org.apache.kafka.common.serialization.ByteArrayDeserializer` and `io.confluent.kafka.serializers.KafkaAvroDeserializer` + +|`schemaRegistryUrl` +|String +|The schema registry url, required in case you are dealing with AVRO messages. + +|=== + +=== Partitions + +[cols="3*",options="header"] +|=== +|Variable Name +|Type +|Description + +|`partition` +|Number +|It's the Kafka partition number to read + +|`offset` +|Number +|It's the offset to start to read the topic partition + +|=== diff --git a/docs/asciidoc/modules/ROOT/pages/database-integration/kafka/producer-configuration.adoc b/docs/asciidoc/modules/ROOT/pages/database-integration/kafka/producer-configuration.adoc new file mode 100644 index 0000000000..2db49d2498 --- /dev/null +++ b/docs/asciidoc/modules/ROOT/pages/database-integration/kafka/producer-configuration.adoc @@ -0,0 +1,39 @@ +== Configuration + +You can set the following configuration values in your `neo4j.conf`, here are the defaults. + +.neo4j.conf (with default values) +[source] +---- +apoc.kafka.bootstrap.servers=localhost:9092 +apoc.kafka.acks=1 +apoc.kafka.retries=2 +apoc.kafka.batch.size=16384 +apoc.kafka.buffer.memory=33554432 +apoc.kafka.reindex.batch.size=1000 +apoc.kafka.session.timeout.ms=15000 +apoc.kafka.connection.timeout.ms=10000 +apoc.kafka.replication=1 +apoc.kafka.linger.ms=1 +apoc.kafka.transactional.id= +apoc.kafka.topic.discovery.polling.interval=300000 +apoc.kafka.log.compaction.strategy=delete +---- + +[NOTE] +==== +**To use the Kafka transactions please set `kafka.transactional.id` and `kafka.acks` properly**. +Checkout this {url-confluent-blog}/transactions-apache-kafka/[blog post] for further details about transactions in Apache Kafka +==== + +See the https://kafka.apache.org/documentation/#brokerconfigs[Apache Kafka documentation] for details on these settings. + +In case you Kafka broker is configured with `auto.create.topics.enable` to `false`, +all the messages sent to topics that don't exist are discarded; +this because the `KafkaProducer.send()` method blocks the execution, as explained in https://issues.apache.org/jira/browse/KAFKA-3539[KAFKA-3539]. +You can tune the custom property `kafka.topic.discovery.polling.interval` in order to +periodically check for new topics into the Kafka cluster so the procedures will be able +to send events to the defined topics. + + + diff --git a/extended/src/main/java/apoc/ExtendedApocConfig.java b/extended/src/main/java/apoc/ExtendedApocConfig.java index 191da04571..0e324c09f4 100644 --- a/extended/src/main/java/apoc/ExtendedApocConfig.java +++ b/extended/src/main/java/apoc/ExtendedApocConfig.java @@ -44,6 +44,7 @@ public class ExtendedApocConfig extends LifecycleAdapter public static final String APOC_ML_WATSON_URL = "apoc.ml.watson.url"; public static final String APOC_AWS_KEY_ID = "apoc.aws.key.id"; public static final String APOC_AWS_SECRET_KEY = "apoc.aws.secret.key"; + public static final String APOC_KAFKA_ENABLED = "apoc.kafka.enabled"; public enum UuidFormatType { hex, base64 } // These were earlier added via the Neo4j config using the ApocSettings.java class diff --git a/extended/src/main/java/apoc/ExtendedApocGlobalComponents.java b/extended/src/main/java/apoc/ExtendedApocGlobalComponents.java index e103917264..daf064fef3 100644 --- a/extended/src/main/java/apoc/ExtendedApocGlobalComponents.java +++ b/extended/src/main/java/apoc/ExtendedApocGlobalComponents.java @@ -11,12 +11,18 @@ import org.neo4j.kernel.availability.AvailabilityListener; import org.neo4j.kernel.internal.GraphDatabaseAPI; import org.neo4j.kernel.lifecycle.Lifecycle; +import org.neo4j.logging.Log; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; +import static apoc.ExtendedApocConfig.APOC_KAFKA_ENABLED; + @ServiceProvider public class ExtendedApocGlobalComponents implements ApocGlobalComponents { @@ -31,35 +37,61 @@ public Map getServices(GraphDatabaseAPI db, ApocExtensionFact dependencies.globalProceduresRegistry() ); - return Map.of( + Map serviceMap = new HashMap<>(); + serviceMap.put("ttl", new TTLLifeCycle(dependencies.scheduler(), + db, + TTLConfig.ttlConfig(), + dependencies.log().getUserLog(TTLLifeCycle.class))); - "ttl", new TTLLifeCycle(dependencies.scheduler(), - db, - TTLConfig.ttlConfig(), - dependencies.log().getUserLog(TTLLifeCycle.class)), + serviceMap.put("uuid", new UuidHandler(db, + dependencies.databaseManagementService(), + dependencies.log().getUserLog(Uuid.class), + dependencies.apocConfig(), + dependencies.scheduler(), + dependencies.pools())); - "uuid", new UuidHandler(db, - dependencies.databaseManagementService(), - dependencies.log().getUserLog(Uuid.class), - dependencies.apocConfig(), - dependencies.scheduler(), - dependencies.pools()), + serviceMap.put("directory", new LoadDirectoryHandler(db, + dependencies.log().getUserLog(LoadDirectory.class), + dependencies.pools())); - "directory", new LoadDirectoryHandler(db, - dependencies.log().getUserLog(LoadDirectory.class), - dependencies.pools()), + serviceMap.put("cypherProcedures", cypherProcedureHandler); + + // add kafkaHandler only if apoc.kafka.enabled=true + boolean isKafkaEnabled = dependencies.apocConfig().getConfig().getBoolean(APOC_KAFKA_ENABLED, false); + if (isKafkaEnabled) { + try { + Class kafkaHandlerClass = Class.forName("apoc.kafka.KafkaHandler"); + Lifecycle kafkaHandler = (Lifecycle) kafkaHandlerClass + .getConstructor(GraphDatabaseAPI.class, Log.class) + .newInstance(db, dependencies.log().getUserLog(kafkaHandlerClass)); + + serviceMap.put("kafkaHandler", kafkaHandler); + } catch (Exception e) { + dependencies.log().getUserLog(ExtendedApocGlobalComponents.class) + .warn(""" + Cannot find the Kafka extra jar. + Please put the apoc-kafka-dependencies-5.x.x-all.jar into plugin folder. + See the documentation: https://neo4j.com/labs/apoc/5/overview/apoc.kakfa"""); + } + } + + return serviceMap; - "cypherProcedures", cypherProcedureHandler - ); } @Override public Collection getContextClasses() { - return List.of(CypherProceduresHandler.class, UuidHandler.class, LoadDirectoryHandler.class); + List contextClasses = new ArrayList<>( + Arrays.asList(CypherProceduresHandler.class, UuidHandler.class, LoadDirectoryHandler.class) + ); + try { + contextClasses.add(Class.forName("apoc.kafka.KafkaHandler")); + } catch (ClassNotFoundException ignored) {} + return contextClasses; } @Override public Iterable getListeners(GraphDatabaseAPI db, ApocExtensionFactory.Dependencies dependencies) { return Collections.emptyList(); } -} +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/KafkaHandler.kt b/extended/src/main/kotlin/apoc/kafka/KafkaHandler.kt new file mode 100644 index 0000000000..b508410a66 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/KafkaHandler.kt @@ -0,0 +1,48 @@ +package apoc.kafka + +import apoc.ApocConfig +import apoc.ExtendedApocConfig.APOC_KAFKA_ENABLED +import apoc.kafka.config.StreamsConfig +import apoc.kafka.consumer.StreamsSinkConfigurationListener +import apoc.kafka.producer.StreamsRouterConfigurationListener +import org.neo4j.kernel.internal.GraphDatabaseAPI +import org.neo4j.kernel.lifecycle.LifecycleAdapter +import org.neo4j.logging.Log + +class KafkaHandler(): LifecycleAdapter() { + + private lateinit var db: GraphDatabaseAPI + private lateinit var log: Log + + constructor(db: GraphDatabaseAPI, log: Log) : this() { + this.db = db + this.log = log + } + + override fun start() { + if(ApocConfig.apocConfig().getBoolean(APOC_KAFKA_ENABLED)) { + + try { + StreamsRouterConfigurationListener(db, log) + .start(StreamsConfig.getConfiguration()) + } catch (e: Exception) { + log.error("Exception in StreamsRouterConfigurationListener {}", e.message) + } + + try { + StreamsSinkConfigurationListener(db, log) + .start(StreamsConfig.getConfiguration()) + } catch (e: Exception) { + log.error("Exception in StreamsSinkConfigurationListener {}", e.message) + } + } + } + + override fun stop() { + if(ApocConfig.apocConfig().getBoolean(APOC_KAFKA_ENABLED)) { + + StreamsRouterConfigurationListener(db, log).shutdown() + StreamsSinkConfigurationListener(db, log).shutdown() + } + } +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/PublishProcedures.kt b/extended/src/main/kotlin/apoc/kafka/PublishProcedures.kt new file mode 100644 index 0000000000..4cc4764c0d --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/PublishProcedures.kt @@ -0,0 +1,108 @@ +package apoc.kafka + +import apoc.kafka.producer.events.StreamsEventBuilder +import apoc.kafka.producer.kafka.KafkaEventRouter +import apoc.kafka.utils.KafkaUtil +import apoc.kafka.utils.KafkaUtil.checkEnabled +import kotlinx.coroutines.runBlocking +import org.neo4j.kernel.internal.GraphDatabaseAPI +import org.neo4j.logging.Log +import org.neo4j.procedure.Context +import org.neo4j.procedure.Description +import org.neo4j.procedure.Mode +import org.neo4j.procedure.Name +import org.neo4j.procedure.Procedure +import java.util.concurrent.ConcurrentHashMap +import java.util.stream.Stream + +data class StreamPublishResult(@JvmField val value: Map) + +data class StreamsEventSinkStoreEntry(val eventRouter: KafkaEventRouter, +) +class PublishProcedures { + + @JvmField @Context + var db: GraphDatabaseAPI? = null + + @JvmField @Context var log: Log? = null + + @Procedure(mode = Mode.READ, name = "apoc.kafka.publish.sync") + @Description("apoc.kafka.publish.sync(topic, payload, config) - Allows custom synchronous streaming from Neo4j to the configured stream environment") + fun sync(@Name("topic") topic: String?, @Name("payload") payload: Any?, + @Name(value = "config", defaultValue = "{}") config: Map?): Stream { + checkEnabled() + if (isTopicNullOrEmpty(topic)) { + return Stream.empty() + } + checkPayloadNotNull(payload) + + val streamsEvent = buildStreamEvent(topic!!, payload!!) + return getStreamsEventSinkStoreEntry().eventRouter + .sendEventsSync(topic, listOf(streamsEvent), config ?: emptyMap()) + .map { StreamPublishResult(it) } + .stream() + } + + @Procedure(mode = Mode.READ, name = "apoc.kafka.publish") + @Description("apoc.kafka.publish(topic, payload, config) - Allows custom streaming from Neo4j to the configured stream environment") + fun publish(@Name("topic") topic: String?, @Name("payload") payload: Any?, + @Name(value = "config", defaultValue = "{}") config: Map?) = runBlocking { + checkEnabled() + if (isTopicNullOrEmpty(topic)) { + return@runBlocking + } + checkPayloadNotNull(payload) + + val streamsEvent = buildStreamEvent(topic!!, payload!!) + getStreamsEventSinkStoreEntry().eventRouter.sendEvents(topic, listOf(streamsEvent), config ?: emptyMap()) + } + + private fun isTopicNullOrEmpty(topic: String?): Boolean { + return if (topic.isNullOrEmpty()) { + log?.info("Topic empty, no message sent") + true + } else { + false + } + } + + private fun checkPayloadNotNull(payload: Any?) { + if (payload == null) { + log?.error("Payload empty, no message sent") + throw RuntimeException("Payload may not be null") + } + } + + private fun buildStreamEvent(topic: String, payload: Any) = StreamsEventBuilder() + .withPayload(payload) + .withNodeRoutingConfiguration(getStreamsEventSinkStoreEntry() + .eventRouter + .eventRouterConfiguration + .nodeRouting + .firstOrNull { it.topic == topic }) + .withRelationshipRoutingConfiguration(getStreamsEventSinkStoreEntry() + .eventRouter + .eventRouterConfiguration + .relRouting + .firstOrNull { it.topic == topic }) + .withTopic(topic) + .build() + + private fun getStreamsEventSinkStoreEntry() = streamsEventRouterStore[db!!.databaseName()]!! + + companion object { + + private val streamsEventRouterStore = ConcurrentHashMap() + + fun register( + db: GraphDatabaseAPI, + evtRouter: KafkaEventRouter, + ) { + streamsEventRouterStore[KafkaUtil.getName(db)] = StreamsEventSinkStoreEntry(evtRouter/*, txHandler*/) + } + + fun unregister(db: GraphDatabaseAPI) { + streamsEventRouterStore.remove(KafkaUtil.getName(db)) + } + } +} diff --git a/extended/src/main/kotlin/apoc/kafka/config/StreamsConfig.kt b/extended/src/main/kotlin/apoc/kafka/config/StreamsConfig.kt new file mode 100644 index 0000000000..67a9b07e7d --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/config/StreamsConfig.kt @@ -0,0 +1,62 @@ +package apoc.kafka.config + +import apoc.ApocConfig +import org.apache.commons.configuration2.ConfigurationMap +import org.apache.kafka.clients.consumer.ConsumerConfig + +class StreamsConfig { + + companion object { + + fun getConfiguration(additionalConfigs: Map = emptyMap()): Map { + val config = ApocConfig.apocConfig().config + + val map = ConfigurationMap(config) + .filter { it.value is String } + .toMutableMap() as Map + return convert(map, additionalConfigs) + } + + const val SOURCE_ENABLED = "apoc.kafka.source.enabled" + const val SOURCE_ENABLED_VALUE = true + const val PROCEDURES_ENABLED = "apoc.kafka.procedures.enabled" + const val PROCEDURES_ENABLED_VALUE = true + const val SINK_ENABLED = "apoc.kafka.sink.enabled" + const val SINK_ENABLED_VALUE = false + const val CHECK_APOC_TIMEOUT = "apoc.kafka.check.apoc.timeout" + const val CHECK_APOC_INTERVAL = "apoc.kafka.check.apoc.interval" + const val CLUSTER_ONLY = "apoc.kafka.cluster.only" + const val CHECK_WRITEABLE_INSTANCE_INTERVAL = "apoc.kafka.check.writeable.instance.interval" + const val POLL_INTERVAL = "apoc.kafka.sink.poll.interval" + const val INSTANCE_WAIT_TIMEOUT = "apoc.kafka.wait.timeout" + const val INSTANCE_WAIT_TIMEOUT_VALUE = 120000L + + fun isSourceGloballyEnabled(config: Map) = config.getOrDefault(SOURCE_ENABLED, SOURCE_ENABLED_VALUE).toString().toBoolean() + + fun isSourceEnabled(config: Map, dbName: String) = config.getOrDefault("${SOURCE_ENABLED}.from.$dbName", isSourceGloballyEnabled(config)).toString().toBoolean() + + fun hasProceduresGloballyEnabled(config: Map) = config.getOrDefault(PROCEDURES_ENABLED, PROCEDURES_ENABLED_VALUE).toString().toBoolean() + + fun hasProceduresEnabled(config: Map, dbName: String) = config.getOrDefault("${PROCEDURES_ENABLED}.$dbName", hasProceduresGloballyEnabled(config)).toString().toBoolean() + + fun isSinkGloballyEnabled(config: Map) = config.getOrDefault(SINK_ENABLED, SINK_ENABLED_VALUE).toString().toBoolean() + + fun isSinkEnabled(config: Map, dbName: String) = config.getOrDefault("${SINK_ENABLED}.to.$dbName", isSinkGloballyEnabled(config)).toString().toBoolean() + + fun getInstanceWaitTimeout(config: Map) = config.getOrDefault(INSTANCE_WAIT_TIMEOUT, INSTANCE_WAIT_TIMEOUT_VALUE).toString().toLong() + + fun convert(props: Map, config: Map): Map { + val mutProps = props.toMutableMap() + val mappingKeys = mapOf( + "broker" to "apoc.kafka.${ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG}", + "from" to "apoc.kafka.${ConsumerConfig.AUTO_OFFSET_RESET_CONFIG}", + "autoCommit" to "apoc.kafka.${ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG}", + "keyDeserializer" to "apoc.kafka.${ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG}", + "valueDeserializer" to "apoc.kafka.${ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG}", + "schemaRegistryUrl" to "apoc.kafka.schema.registry.url", + "groupId" to "apoc.kafka.${ConsumerConfig.GROUP_ID_CONFIG}") + mutProps += config.mapKeys { mappingKeys.getOrDefault(it.key, it.key) } + return mutProps + } + } +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/consumer/StreamsEventConsumer.kt b/extended/src/main/kotlin/apoc/kafka/consumer/StreamsEventConsumer.kt new file mode 100644 index 0000000000..cdce9d7da2 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/consumer/StreamsEventConsumer.kt @@ -0,0 +1,24 @@ +package apoc.kafka.consumer + +import org.neo4j.logging.Log +import apoc.kafka.service.StreamsSinkEntity + + +abstract class StreamsEventConsumer(log: Log, topics: Set) { + + abstract fun stop() + + abstract fun start() + + abstract fun read(topicConfig: Map = emptyMap(), action: (String, List) -> Unit) + + abstract fun read(action: (String, List) -> Unit) + + fun invalidTopics(): List = emptyList() + +} + + +abstract class StreamsEventConsumerFactory { + abstract fun createStreamsEventConsumer(config: Map, log: Log, topics: Set): StreamsEventConsumer +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/consumer/StreamsEventSink.kt b/extended/src/main/kotlin/apoc/kafka/consumer/StreamsEventSink.kt new file mode 100644 index 0000000000..ab28496299 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/consumer/StreamsEventSink.kt @@ -0,0 +1,20 @@ +package apoc.kafka.consumer + +import apoc.kafka.consumer.kafka.KafkaEventSink +import org.neo4j.kernel.internal.GraphDatabaseAPI +import org.neo4j.logging.Log + +object StreamsEventSinkFactory { + fun getStreamsEventSink(config: Map, log: Log, db: GraphDatabaseAPI): KafkaEventSink { + return KafkaEventSink(db) + } +} + +open class StreamsEventSinkConfigMapper(private val streamsConfigMap: Map, private val mappingKeys: Map) { + open fun convert(config: Map): Map { + val props = streamsConfigMap + .toMutableMap() + props += config.mapKeys { mappingKeys.getOrDefault(it.key, it.key) } + return props + } +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/consumer/StreamsEventSinkQueryExecution.kt b/extended/src/main/kotlin/apoc/kafka/consumer/StreamsEventSinkQueryExecution.kt new file mode 100644 index 0000000000..0220ae54c6 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/consumer/StreamsEventSinkQueryExecution.kt @@ -0,0 +1,32 @@ +package apoc.kafka.consumer + +import org.neo4j.kernel.internal.GraphDatabaseAPI +import org.neo4j.logging.Log +import apoc.kafka.extensions.execute +import apoc.kafka.service.StreamsSinkService +import apoc.kafka.service.StreamsStrategyStorage +import apoc.kafka.consumer.utils.ConsumerUtils + +class NotInWriteableInstanceException(message: String): RuntimeException(message) + +class StreamsEventSinkQueryExecution(private val db: GraphDatabaseAPI, + private val log: Log, + streamsStrategyStorage: StreamsStrategyStorage): + StreamsSinkService(streamsStrategyStorage) { + + override fun write(query: String, params: Collection) { + if (params.isEmpty()) return + if (ConsumerUtils.isWriteableInstance(db)) { + db.execute(query, mapOf("events" to params)) { + if (log.isDebugEnabled) { + log.debug("Query statistics:\n${it.queryStatistics}") + } + } + } else { + if (log.isDebugEnabled) { + log.debug("Not writeable instance") + } + NotInWriteableInstanceException("Not writeable instance") + } + } +} diff --git a/extended/src/main/kotlin/apoc/kafka/consumer/StreamsSinkConfigurationListener.kt b/extended/src/main/kotlin/apoc/kafka/consumer/StreamsSinkConfigurationListener.kt new file mode 100644 index 0000000000..21a82a86a4 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/consumer/StreamsSinkConfigurationListener.kt @@ -0,0 +1,29 @@ +package apoc.kafka.consumer + +import apoc.kafka.consumer.kafka.KafkaEventSink +import apoc.kafka.consumer.procedures.StreamsSinkProcedures +import org.neo4j.kernel.internal.GraphDatabaseAPI +import org.neo4j.logging.Log + +class StreamsSinkConfigurationListener(private val db: GraphDatabaseAPI, + private val log: Log) { + + var eventSink: KafkaEventSink? = null + + + fun shutdown() { + StreamsSinkProcedures.unregisterStreamsEventSink(db) + + } + + fun start(configMap: Map) { + + eventSink = StreamsEventSinkFactory + .getStreamsEventSink(configMap, + log, + db) + + StreamsSinkProcedures.registerStreamsEventSink(db, eventSink!!) + } + +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaAutoCommitEventConsumer.kt b/extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaAutoCommitEventConsumer.kt new file mode 100644 index 0000000000..a6591312b6 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaAutoCommitEventConsumer.kt @@ -0,0 +1,138 @@ +package apoc.kafka.consumer.kafka + +import org.apache.kafka.clients.consumer.ConsumerRecord +import org.apache.kafka.clients.consumer.KafkaConsumer +import org.apache.kafka.clients.consumer.OffsetAndMetadata +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.serialization.ByteArrayDeserializer +import org.neo4j.logging.Log +import apoc.kafka.consumer.StreamsEventConsumer +import apoc.kafka.extensions.offsetAndMetadata +import apoc.kafka.extensions.toStreamsSinkEntity +import apoc.kafka.service.StreamsSinkEntity +import apoc.kafka.service.errors.* +import java.time.Duration +import java.util.concurrent.atomic.AtomicBoolean + +data class KafkaTopicConfig(val commit: Boolean, val topicPartitionsMap: Map) { + companion object { + private fun toTopicPartitionMap(topicConfig: Map>>): Map = topicConfig + .flatMap { topicConfigEntry -> + topicConfigEntry.value.map { + val partition = it.getValue("partition").toString().toInt() + val offset = it.getValue("offset").toString().toLong() + TopicPartition(topicConfigEntry.key, partition) to offset + } + } + .toMap() + + fun fromMap(map: Map): KafkaTopicConfig { + val commit = map.getOrDefault("commit", true).toString().toBoolean() + val topicPartitionsMap = toTopicPartitionMap(map + .getOrDefault("partitions", emptyMap>>()) as Map>>) + return KafkaTopicConfig(commit = commit, topicPartitionsMap = topicPartitionsMap) + } + } +} + +abstract class KafkaEventConsumer(config: KafkaSinkConfiguration, + log: Log, + topics: Set): StreamsEventConsumer(log, topics) { + abstract fun wakeup() +} + +open class KafkaAutoCommitEventConsumer(private val config: KafkaSinkConfiguration, + private val log: Log, + val topics: Set, + private val dbName: String): KafkaEventConsumer(config, log, topics) { + + private val errorService: ErrorService = KafkaErrorService(config.asProperties(), + ErrorService.ErrorConfig.from(emptyMap()), + { s, e -> log.error(s,e as Throwable) }) + + private val isSeekSet = AtomicBoolean() + + val consumer: KafkaConsumer<*, *> = when { + config.keyDeserializer == ByteArrayDeserializer::class.java.name && config.valueDeserializer == ByteArrayDeserializer::class.java.name -> KafkaConsumer(config.asProperties()) + else -> throw RuntimeException("Invalid config") + } + + override fun start() { + if (topics.isEmpty()) { + log.info("No topics specified Kafka Consumer will not started") + return + } + this.consumer.subscribe(topics) + } + + override fun stop() { + consumer.close() + errorService.close() + } + + private fun readSimple(action: (String, List) -> Unit) { + val records = consumer.poll(Duration.ZERO) + if (records.isEmpty) return + this.topics.forEach { topic -> + val topicRecords = records.records(topic) + executeAction(action, topic, topicRecords) + } + } + + fun executeAction(action: (String, List) -> Unit, topic: String, topicRecords: Iterable>) { + try { + action(topic, topicRecords.map { it.toStreamsSinkEntity() }) + } catch (e: Exception) { + errorService.report(topicRecords.map { ErrorData.from(it, e, this::class.java, dbName) }) + } + } + + fun readFromPartition(kafkaTopicConfig: KafkaTopicConfig, + action: (String, List) -> Unit): Map { + setSeek(kafkaTopicConfig.topicPartitionsMap) + val records = consumer.poll(Duration.ZERO) + return when (records.isEmpty) { + true -> emptyMap() + else -> kafkaTopicConfig.topicPartitionsMap + .mapValues { records.records(it.key) } + .filterValues { it.isNotEmpty() } + .mapValues { (topic, topicRecords) -> + executeAction(action, topic.topic(), topicRecords) + topicRecords.last().offsetAndMetadata() + } + } + } + + override fun read(action: (String, List) -> Unit) { + readSimple(action) + } + + override fun read(topicConfig: Map, action: (String, List) -> Unit) { + val kafkaTopicConfig = KafkaTopicConfig.fromMap(topicConfig) + if (kafkaTopicConfig.topicPartitionsMap.isEmpty()) { + readSimple(action) + } else { + readFromPartition(kafkaTopicConfig, action) + } + } + + private fun setSeek(topicPartitionsMap: Map) { + if (!isSeekSet.compareAndSet(false, true)) { + return + } + consumer.poll(0) // dummy call see: https://stackoverflow.com/questions/41008610/kafkaconsumer-0-10-java-api-error-message-no-current-assignment-for-partition + topicPartitionsMap.forEach { + when (it.value) { + -1L -> consumer.seekToBeginning(listOf(it.key)) + -2L -> consumer.seekToEnd(listOf(it.key)) + else -> consumer.seek(it.key, it.value) + } + } + } + + override fun wakeup() { + consumer.wakeup() + } +} + diff --git a/extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaEventSink.kt b/extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaEventSink.kt new file mode 100644 index 0000000000..79b99099db --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaEventSink.kt @@ -0,0 +1,48 @@ +package apoc.kafka.consumer.kafka + +import apoc.kafka.consumer.StreamsEventConsumer +import apoc.kafka.consumer.StreamsEventConsumerFactory +import apoc.kafka.events.KafkaStatus +import apoc.kafka.extensions.isDefaultDb +import kotlinx.coroutines.Job +import kotlinx.coroutines.isActive +import kotlinx.coroutines.runBlocking +import kotlinx.coroutines.sync.Mutex +import kotlinx.coroutines.sync.withLock +import org.neo4j.kernel.internal.GraphDatabaseAPI +import org.neo4j.logging.Log + +class KafkaEventSink(private val db: GraphDatabaseAPI) { + + private val mutex = Mutex() + + private var job: Job? = null + + + fun getEventConsumerFactory(): StreamsEventConsumerFactory { + return object: StreamsEventConsumerFactory() { + override fun createStreamsEventConsumer(config: Map, log: Log, topics: Set): StreamsEventConsumer { + val dbName = db.databaseName() + val kafkaConfig = KafkaSinkConfiguration.from(config, dbName, db.isDefaultDb()) + val topics1 = topics as Set + return if (kafkaConfig.enableAutoCommit) { + KafkaAutoCommitEventConsumer(kafkaConfig, log, topics1, dbName) + } else { + KafkaManualCommitEventConsumer(kafkaConfig, log, topics1, dbName) + } + } + } + } + + fun status(): KafkaStatus = runBlocking { + mutex.withLock(job) { + status(job) + } + } + + private fun status(job: Job?): KafkaStatus = when (job?.isActive) { + true -> KafkaStatus.RUNNING + else -> KafkaStatus.STOPPED + } + +} diff --git a/extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaManualCommitEventConsumer.kt b/extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaManualCommitEventConsumer.kt new file mode 100644 index 0000000000..6871f036f8 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaManualCommitEventConsumer.kt @@ -0,0 +1,118 @@ +package apoc.kafka.consumer.kafka + +import org.apache.kafka.clients.consumer.CommitFailedException +import org.apache.kafka.clients.consumer.ConsumerRebalanceListener +import org.apache.kafka.clients.consumer.OffsetAndMetadata +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.errors.WakeupException +import org.neo4j.logging.Log +import apoc.kafka.extensions.offsetAndMetadata +import apoc.kafka.extensions.topicPartition +import apoc.kafka.service.StreamsSinkEntity +import java.time.Duration + +class KafkaManualCommitEventConsumer(config: KafkaSinkConfiguration, + private val log: Log, + topics: Set, + dbName: String): KafkaAutoCommitEventConsumer(config, log, topics, dbName) { + + private val asyncCommit = config.asyncCommit + + override fun stop() { + if (asyncCommit) { + doCommitSync() + } + super.stop() + } + + private fun doCommitSync() { + try { + /* + * While everything is fine, we use commitAsync. + * It is faster, and if one commit fails, the next commit will serve as a retry. + * But if we are closing, there is no "next commit". We call commitSync(), + * because it will retry until it succeeds or suffers unrecoverable failure. + */ + consumer.commitSync() + } catch (e: WakeupException) { + // we're shutting down, but finish the commit first and then + // rethrow the exception so that the main loop can exit + doCommitSync() + throw e + } catch (e: CommitFailedException) { + // the commit failed with an unrecoverable error. if there is any + // internal state which depended on the commit, you can clean it + // up here. otherwise it's reasonable to ignore the error and go on + log.warn("Commit failed", e) + } + } + + override fun start() { + if (asyncCommit) { + if (topics.isEmpty()) { + log.info("No topics specified Kafka Consumer will not started") + return + } + this.consumer.subscribe(topics, object : ConsumerRebalanceListener { + override fun onPartitionsRevoked(partitions: Collection) = doCommitSync() + + override fun onPartitionsAssigned(partitions: Collection) {} + }) + } else { + super.start() + } + } + + private fun commitData(commit: Boolean, topicMap: Map) { + if (commit && topicMap.isNotEmpty()) { + if (asyncCommit) { + if (log.isDebugEnabled) { + log.debug("Committing data in async") + } + consumer.commitAsync(topicMap) { offsets: MutableMap, exception: Exception? -> + if (exception != null) { + log.warn(""" + |These offsets `$offsets` + |cannot be committed because of the following exception: + """.trimMargin(), exception) + } + } + } else { + if (log.isDebugEnabled) { + log.debug("Committing data in sync") + } + consumer.commitSync(topicMap) + } + } + } + + override fun read(action: (String, List) -> Unit) { + val topicMap = readSimple(action) + commitData(true, topicMap) + } + + override fun read(topicConfig: Map, action: (String, List) -> Unit) { + val kafkaTopicConfig = KafkaTopicConfig.fromMap(topicConfig) + val topicMap = if (kafkaTopicConfig.topicPartitionsMap.isEmpty()) { + readSimple(action) + } else { + readFromPartition(kafkaTopicConfig, action) + } + commitData(kafkaTopicConfig.commit, topicMap) + } + + private fun readSimple(action: (String, List) -> Unit): Map { + val records = consumer.poll(Duration.ZERO) + return when (records.isEmpty) { + true -> emptyMap() + else -> records.partitions() + .map { topicPartition -> + val topicRecords = records.records(topicPartition) + executeAction(action, topicPartition.topic(), topicRecords) + val last = topicRecords.last() + last.topicPartition() to last.offsetAndMetadata() + } + .toMap() + } + } +} diff --git a/extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaSinkConfiguration.kt b/extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaSinkConfiguration.kt new file mode 100644 index 0000000000..4b5387f1d0 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/consumer/kafka/KafkaSinkConfiguration.kt @@ -0,0 +1,76 @@ +package apoc.kafka.consumer.kafka + +import org.apache.kafka.clients.CommonClientConfigs +import org.apache.kafka.clients.consumer.ConsumerConfig +import org.apache.kafka.common.serialization.ByteArrayDeserializer +import apoc.kafka.extensions.toPointCase +import apoc.kafka.utils.JSONUtils +import apoc.kafka.utils.KafkaUtil.validateConnection +import java.util.Properties + + +private const val kafkaConfigPrefix = "apoc.kafka." + + +private fun validateDeserializers(config: KafkaSinkConfiguration) {} + +data class KafkaSinkConfiguration(val bootstrapServers: String = "localhost:9092", + val keyDeserializer: String = "org.apache.kafka.common.serialization.ByteArrayDeserializer", + val valueDeserializer: String = "org.apache.kafka.common.serialization.ByteArrayDeserializer", + val groupId: String = "neo4j", + val autoOffsetReset: String = "earliest", + val enableAutoCommit: Boolean = true, + val asyncCommit: Boolean = false, + val extraProperties: Map = emptyMap()) { + + companion object { + + fun from(cfg: Map, dbName: String, isDefaultDb: Boolean): KafkaSinkConfiguration { + val kafkaCfg = create(cfg, dbName, isDefaultDb) + validate(kafkaCfg) + return kafkaCfg + } + + // Visible for testing + fun create(cfg: Map, dbName: String, isDefaultDb: Boolean): KafkaSinkConfiguration { + val config = cfg + .filterKeys { it.startsWith(kafkaConfigPrefix) && !it.startsWith("${kafkaConfigPrefix}sink") } + .mapKeys { it.key.substring(kafkaConfigPrefix.length) } + val default = KafkaSinkConfiguration() + + val keys = JSONUtils.asMap(default).keys.map { it.toPointCase() } + val extraProperties = config.filterKeys { !keys.contains(it) } + + + return default.copy(keyDeserializer = config.getOrDefault(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, default.keyDeserializer), + valueDeserializer = config.getOrDefault(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, default.valueDeserializer), + bootstrapServers = config.getOrDefault(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, default.bootstrapServers), + autoOffsetReset = config.getOrDefault(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, default.autoOffsetReset), + groupId = config.getOrDefault(ConsumerConfig.GROUP_ID_CONFIG, default.groupId) + (if (isDefaultDb) "" else "-$dbName"), + enableAutoCommit = config.getOrDefault(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, default.enableAutoCommit).toString().toBoolean(), + asyncCommit = config.getOrDefault("async.commit", default.asyncCommit).toString().toBoolean(), + extraProperties = extraProperties // for what we don't provide a default configuration + ) + } + + private fun validate(config: KafkaSinkConfiguration) { + validateConnection(config.bootstrapServers, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, false) + val schemaRegistryUrlKey = "schema.registry.url" + if (config.extraProperties.containsKey(schemaRegistryUrlKey)) { + val schemaRegistryUrl = config.extraProperties.getOrDefault(schemaRegistryUrlKey, "") + validateConnection(schemaRegistryUrl, schemaRegistryUrlKey, false) + } + validateDeserializers(config) + } + } + + fun asProperties(): Properties { + val props = Properties() + val map = JSONUtils.asMap(this) + .filterKeys { it != "extraProperties" && it != "sinkConfiguration" } + .mapKeys { it.key.toPointCase() } + props.putAll(map) + props.putAll(extraProperties) + return props + } +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/consumer/procedures/StreamsSinkProcedures.kt b/extended/src/main/kotlin/apoc/kafka/consumer/procedures/StreamsSinkProcedures.kt new file mode 100644 index 0000000000..0c9cafa8c4 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/consumer/procedures/StreamsSinkProcedures.kt @@ -0,0 +1,118 @@ +package apoc.kafka.consumer.procedures + +import apoc.kafka.config.StreamsConfig +import apoc.kafka.consumer.StreamsEventConsumer +import apoc.kafka.consumer.kafka.KafkaEventSink +import apoc.kafka.utils.KafkaUtil +import apoc.kafka.utils.KafkaUtil.checkEnabled +import apoc.util.QueueBasedSpliterator +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.GlobalScope +import kotlinx.coroutines.launch +import kotlinx.coroutines.runBlocking +import org.neo4j.graphdb.GraphDatabaseService +import org.neo4j.kernel.internal.GraphDatabaseAPI +import org.neo4j.logging.Log +import org.neo4j.procedure.Context +import org.neo4j.procedure.Description +import org.neo4j.procedure.Mode +import org.neo4j.procedure.Name +import org.neo4j.procedure.Procedure +import org.neo4j.procedure.TerminationGuard +import java.util.concurrent.ArrayBlockingQueue +import java.util.concurrent.ConcurrentHashMap +import java.util.stream.Stream +import java.util.stream.StreamSupport + +class StreamResult(@JvmField val event: Map) +class KeyValueResult(@JvmField val name: String, @JvmField val value: Any?) + +class StreamsSinkProcedures { + + + @JvmField @Context + var log: Log? = null + + @JvmField @Context + var db: GraphDatabaseAPI? = null + + @JvmField @Context + var terminationGuard: TerminationGuard? = null + + @Procedure(mode = Mode.READ, name = "apoc.kafka.consume") + @Description("apoc.kafka.consume(topic, {timeout: , from: , groupId: , commit: , partitions:[{partition: , offset: }]}) " + + "YIELD event - Allows to consume custom topics") + fun consume(@Name("topic") topic: String?, + @Name(value = "config", defaultValue = "{}") config: Map?): Stream = runBlocking { + checkEnabled() + if (topic.isNullOrEmpty()) { + log?.info("Topic empty, no message sent") + Stream.empty() + } else { + val properties = config?.mapValues { it.value.toString() } ?: emptyMap() + + val configuration = StreamsConfig.getConfiguration(properties) + readData(topic, config ?: emptyMap(), configuration) + } + } + + private fun checkLeader(lambda: () -> Stream): Stream = if (KafkaUtil.isWriteableInstance(db as GraphDatabaseAPI)) { + lambda() + } else { + Stream.of(KeyValueResult("error", "You can use this procedure only in the LEADER or in a single instance configuration.")) + } + + private fun readData(topic: String, procedureConfig: Map, consumerConfig: Map): Stream { + val cfg = procedureConfig.mapValues { if (it.key != "partitions") it.value else mapOf(topic to it.value) } + val timeout = cfg.getOrDefault("timeout", 1000).toString().toLong() + val data = ArrayBlockingQueue(1000) + val tombstone = StreamResult(emptyMap()) + GlobalScope.launch(Dispatchers.IO) { + val consumer = createConsumer(consumerConfig, topic) + consumer.start() + try { + val start = System.currentTimeMillis() + while ((System.currentTimeMillis() - start) < timeout) { + consumer.read(cfg) { _, topicData -> + data.addAll(topicData.mapNotNull { it.value }.map { StreamResult(mapOf("data" to it)) }) + } + } + data.add(tombstone) + } catch (e: Exception) { + if (log?.isDebugEnabled!!) { + log?.error("Error while consuming data", e) + } + } finally { + consumer.stop() + } + } + if (log?.isDebugEnabled!!) { + log?.debug("Data retrieved from topic $topic after $timeout milliseconds: $data") + } + + return StreamSupport.stream(QueueBasedSpliterator(data, tombstone, terminationGuard, timeout.toInt()), false) + } + + private fun createConsumer(consumerConfig: Map, topic: String): StreamsEventConsumer = runBlocking { + val copy = StreamsConfig.getConfiguration() + .filter { it.value is String } + .mapValues { it.value } + .toMutableMap() + copy.putAll(consumerConfig) + getStreamsEventSink(db!!)!!.getEventConsumerFactory() + .createStreamsEventConsumer(copy, log!!, setOf(topic)) + } + + companion object { + private val streamsEventSinkStore = ConcurrentHashMap() + + private fun getStreamsEventSink(db: GraphDatabaseService) = streamsEventSinkStore[KafkaUtil.getName(db)] + + fun registerStreamsEventSink(db: GraphDatabaseAPI, streamsEventSink: KafkaEventSink) { + streamsEventSinkStore[KafkaUtil.getName(db)] = streamsEventSink + } + + fun unregisterStreamsEventSink(db: GraphDatabaseAPI) = streamsEventSinkStore.remove(KafkaUtil.getName(db)) + + } +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/consumer/utils/ConsumerUtils.kt b/extended/src/main/kotlin/apoc/kafka/consumer/utils/ConsumerUtils.kt new file mode 100644 index 0000000000..d67bddcfb4 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/consumer/utils/ConsumerUtils.kt @@ -0,0 +1,13 @@ +package apoc.kafka.consumer.utils + +import org.neo4j.kernel.internal.GraphDatabaseAPI +import apoc.kafka.utils.KafkaUtil + +object ConsumerUtils { + + fun isWriteableInstance(db: GraphDatabaseAPI): Boolean = KafkaUtil.isWriteableInstance(db) + + fun executeInWriteableInstance(db: GraphDatabaseAPI, + action: () -> T?): T? = KafkaUtil.executeInWriteableInstance(db, action) + +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/events/KafkaStatus.kt b/extended/src/main/kotlin/apoc/kafka/events/KafkaStatus.kt new file mode 100644 index 0000000000..402ee14072 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/events/KafkaStatus.kt @@ -0,0 +1,3 @@ +package apoc.kafka.events + +enum class KafkaStatus { RUNNING, STOPPED, UNKNOWN } \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/events/StreamsEvent.kt b/extended/src/main/kotlin/apoc/kafka/events/StreamsEvent.kt new file mode 100644 index 0000000000..94067cf6c7 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/events/StreamsEvent.kt @@ -0,0 +1,70 @@ +package apoc.kafka.events + + +enum class OperationType { created, updated, deleted } + +data class Meta(val timestamp: Long, + val username: String, + val txId: Long, + val txEventId: Int, + val txEventsCount: Int, + val operation: OperationType, + val source: Map = emptyMap()) + + +enum class EntityType { node, relationship } + +data class RelationshipNodeChange(val id: String, + val labels: List?, + val ids: Map) + +abstract class RecordChange{ abstract val properties: Map? } +data class NodeChange(override val properties: Map?, + val labels: List?): RecordChange() + +data class RelationshipChange(override val properties: Map?): RecordChange() + +abstract class Payload { + abstract val id: String + abstract val type: EntityType + abstract val before: RecordChange? + abstract val after: RecordChange? +} +data class NodePayload(override val id: String, + override val before: NodeChange?, + override val after: NodeChange?, + override val type: EntityType = EntityType.node): Payload() + +data class RelationshipPayload(override val id: String, + val start: RelationshipNodeChange, + val end: RelationshipNodeChange, + override val before: RelationshipChange?, + override val after: RelationshipChange?, + val label: String, + override val type: EntityType = EntityType.relationship): Payload() + +enum class StreamsConstraintType { UNIQUE, NODE_PROPERTY_EXISTS, RELATIONSHIP_PROPERTY_EXISTS } + +enum class RelKeyStrategy { DEFAULT, ALL } + +data class Constraint(val label: String?, + val properties: Set, + val type: StreamsConstraintType) + +data class Schema(val properties: Map = emptyMap(), + val constraints: List = emptyList()) + +open class StreamsEvent(open val payload: Any) +data class StreamsTransactionEvent(val meta: Meta, override val payload: Payload, val schema: Schema): StreamsEvent(payload) + +data class StreamsTransactionNodeEvent(val meta: Meta, + val payload: NodePayload, + val schema: Schema) { + fun toStreamsTransactionEvent() = StreamsTransactionEvent(this.meta, this.payload, this.schema) +} +data class StreamsTransactionRelationshipEvent(val meta: Meta, + val payload: RelationshipPayload, + val schema: Schema) { + fun toStreamsTransactionEvent() = StreamsTransactionEvent(this.meta, this.payload, this.schema) +} + diff --git a/extended/src/main/kotlin/apoc/kafka/extensions/CommonExtensions.kt b/extended/src/main/kotlin/apoc/kafka/extensions/CommonExtensions.kt new file mode 100644 index 0000000000..4a6f109e7d --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/extensions/CommonExtensions.kt @@ -0,0 +1,83 @@ +package apoc.kafka.extensions + +import org.apache.avro.Schema +import org.apache.avro.generic.GenericEnumSymbol +import org.apache.avro.generic.GenericFixed +import org.apache.avro.generic.GenericRecord +import org.apache.avro.generic.IndexedRecord +import org.apache.kafka.clients.consumer.ConsumerRecord +import org.apache.kafka.clients.consumer.OffsetAndMetadata +import org.apache.kafka.common.TopicPartition +import org.neo4j.graphdb.Node +import apoc.kafka.utils.JSONUtils +import apoc.kafka.service.StreamsSinkEntity +import java.nio.ByteBuffer +import java.util.* +import javax.lang.model.SourceVersion + +private fun convertData(data: Any?, stringWhenFailure: Boolean = false): Any? { + return when (data) { + null -> null + is ByteArray -> JSONUtils.readValue(data, Any::class.java) + is GenericRecord -> data.toMap() + else -> if (stringWhenFailure) data.toString() else throw RuntimeException("Unsupported type ${data::class.java.name}") + } +} + +fun Map.getInt(name:String, defaultValue: Int) = this.get(name)?.toInt() ?: defaultValue +fun Map<*, *>.asProperties() = this.let { + val properties = Properties() + properties.putAll(it) + properties +} + +fun Node.labelNames() : List { + return this.labels.map { it.name() } +} + +fun String.toPointCase(): String { + return this.split("(?<=[a-z])(?=[A-Z])".toRegex()).joinToString(separator = ".").toLowerCase() +} + +fun String.quote(): String = if (SourceVersion.isIdentifier(this)) this else "`$this`" + +fun Map.flatten(map: Map = this, prefix: String = ""): Map { + return map.flatMap { + val key = it.key + val value = it.value + val newKey = if (prefix != "") "$prefix.$key" else key + if (value is Map<*, *>) { + flatten(value as Map, newKey).toList() + } else { + listOf(newKey to value) + } + }.toMap() +} + +fun ConsumerRecord<*, *>.topicPartition() = TopicPartition(this.topic(), this.partition()) +fun ConsumerRecord<*, *>.offsetAndMetadata(metadata: String = "") = OffsetAndMetadata(this.offset() + 1, metadata) + +private fun convertAvroData(rawValue: Any?): Any? = when (rawValue) { + is IndexedRecord -> rawValue.toMap() + is Collection<*> -> rawValue.map(::convertAvroData) + is Array<*> -> if (rawValue.javaClass.componentType.isPrimitive) rawValue else rawValue.map(::convertAvroData) + is Map<*, *> -> rawValue + .mapKeys { it.key.toString() } + .mapValues { convertAvroData(it.value) } + is GenericFixed -> rawValue.bytes() + is ByteBuffer -> rawValue.array() + is GenericEnumSymbol<*>, is CharSequence -> rawValue.toString() + else -> rawValue +} +fun IndexedRecord.toMap() = this.schema.fields + .map { it.name() to convertAvroData(this[it.pos()]) } + .toMap() + +fun Schema.toMap() = JSONUtils.asMap(this.toString()) + + +fun ConsumerRecord<*, *>.toStreamsSinkEntity(): StreamsSinkEntity { + val key = convertData(this.key(), true) + val value = convertData(this.value()) + return StreamsSinkEntity(key, value) +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/extensions/CoroutineExtensions.kt b/extended/src/main/kotlin/apoc/kafka/extensions/CoroutineExtensions.kt new file mode 100644 index 0000000000..e4455533f1 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/extensions/CoroutineExtensions.kt @@ -0,0 +1,44 @@ +package apoc.kafka.extensions + +import kotlinx.coroutines.Deferred +import kotlinx.coroutines.ExperimentalCoroutinesApi +import kotlinx.coroutines.ObsoleteCoroutinesApi +import kotlinx.coroutines.channels.ticker +import kotlinx.coroutines.selects.whileSelect +import java.util.concurrent.CopyOnWriteArraySet +import java.util.concurrent.TimeoutException + + +// taken from https://stackoverflow.com/questions/52192752/kotlin-how-to-run-n-coroutines-and-wait-for-first-m-results-or-timeout +@ObsoleteCoroutinesApi +@ExperimentalCoroutinesApi +suspend fun List>.awaitAll(timeoutMs: Long): List { + val jobs = CopyOnWriteArraySet>(this) + val result = ArrayList(size) + val timeout = ticker(timeoutMs) + + whileSelect { + jobs.forEach { deferred -> + deferred.onAwait { + jobs.remove(deferred) + result.add(it) + result.size != size + } + } + + timeout.onReceive { + jobs.forEach { it.cancel() } + throw TimeoutException("Tasks $size cancelled after timeout of $timeoutMs ms.") + } + } + + return result +} + +@ExperimentalCoroutinesApi +fun Deferred.errors() = when { + isCompleted -> getCompletionExceptionOrNull() + isCancelled -> getCompletionExceptionOrNull() // was getCancellationException() + isActive -> RuntimeException("Job $this still active") + else -> null +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/extensions/DatabaseManagementServiceExtensions.kt b/extended/src/main/kotlin/apoc/kafka/extensions/DatabaseManagementServiceExtensions.kt new file mode 100644 index 0000000000..08d7ed2688 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/extensions/DatabaseManagementServiceExtensions.kt @@ -0,0 +1,28 @@ +package apoc.kafka.extensions + +import apoc.kafka.utils.KafkaUtil +import org.neo4j.dbms.api.DatabaseManagementService +import org.neo4j.kernel.internal.GraphDatabaseAPI + +fun DatabaseManagementService.getSystemDb() = this.database(KafkaUtil.SYSTEM_DATABASE_NAME) as GraphDatabaseAPI + +fun DatabaseManagementService.getDefaultDbName() = getSystemDb().let { + try { + it.beginTx().use { + val col = it.execute("SHOW DEFAULT DATABASE").columnAs("name") + if (col.hasNext()) { + col.next() + } else { + null + } + } + } catch (e: Exception) { + null + } +} + +fun DatabaseManagementService.getDefaultDb() = getDefaultDbName()?.let { this.database(it) as GraphDatabaseAPI } + +fun DatabaseManagementService.isAvailable(timeout: Long) = this.listDatabases() + .all { this.database(it).isAvailable(timeout) } + diff --git a/extended/src/main/kotlin/apoc/kafka/extensions/GraphDatabaseServerExtensions.kt b/extended/src/main/kotlin/apoc/kafka/extensions/GraphDatabaseServerExtensions.kt new file mode 100644 index 0000000000..62aec6c725 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/extensions/GraphDatabaseServerExtensions.kt @@ -0,0 +1,32 @@ +package apoc.kafka.extensions + +import apoc.kafka.utils.KafkaUtil +import org.neo4j.common.DependencyResolver +import org.neo4j.dbms.api.DatabaseManagementService +import org.neo4j.graphdb.GraphDatabaseService +import org.neo4j.graphdb.Result +import org.neo4j.graphdb.event.TransactionEventListener +import org.neo4j.kernel.internal.GraphDatabaseAPI + +fun GraphDatabaseService.execute(cypher: String) = this.execute(cypher, emptyMap()) +fun GraphDatabaseService.execute(cypher: String, params: Map) = this.executeTransactionally(cypher, params) + +fun GraphDatabaseService.execute(cypher: String, lambda: ((Result) -> T)) = this.execute(cypher, emptyMap(), lambda) +fun GraphDatabaseService.execute(cypher: String, + params: Map, + lambda: ((Result) -> T)) = this.executeTransactionally(cypher, params, lambda) + +fun GraphDatabaseService.isSystemDb() = this.databaseName() == KafkaUtil.SYSTEM_DATABASE_NAME + +fun GraphDatabaseService.databaseManagementService() = (this as GraphDatabaseAPI).dependencyResolver + .resolveDependency(DatabaseManagementService::class.java, DependencyResolver.SelectionStrategy.SINGLE) + +fun GraphDatabaseService.isDefaultDb() = databaseManagementService().getDefaultDbName() == databaseName() + +fun GraphDatabaseService.registerTransactionEventListener(txHandler: TransactionEventListener<*>) { + databaseManagementService().registerTransactionEventListener(this.databaseName(), txHandler) +} + +fun GraphDatabaseService.unregisterTransactionEventListener(txHandler: TransactionEventListener<*>) { + databaseManagementService().unregisterTransactionEventListener(this.databaseName(), txHandler) +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/producer/Extensions.kt b/extended/src/main/kotlin/apoc/kafka/producer/Extensions.kt new file mode 100644 index 0000000000..6286bc39f3 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/producer/Extensions.kt @@ -0,0 +1,82 @@ +package apoc.kafka.producer + +import apoc.kafka.events.EntityType +import apoc.kafka.events.NodeChange +import apoc.kafka.events.NodePayload +import apoc.kafka.events.OperationType +import apoc.kafka.events.RelationshipNodeChange +import apoc.kafka.events.RelationshipPayload +import apoc.kafka.events.Schema +import apoc.kafka.events.StreamsConstraintType +import apoc.kafka.events.StreamsTransactionEvent +import apoc.kafka.extensions.labelNames +import apoc.kafka.utils.KafkaUtil.getNodeKeys +import org.apache.kafka.clients.producer.RecordMetadata +import org.apache.kafka.common.config.TopicConfig +import org.neo4j.graphdb.Node +import org.neo4j.graphdb.Relationship +import org.neo4j.graphdb.schema.ConstraintDefinition +import org.neo4j.graphdb.schema.ConstraintType + +fun Node.toMap(): Map { + return mapOf("id" to id.toString(), "properties" to allProperties, "labels" to labelNames(), "type" to EntityType.node) +} + +fun Relationship.toMap(): Map { + return mapOf("id" to id.toString(), "properties" to allProperties, "label" to type.name(), + "start" to startNode.toMap(), + "end" to endNode.toMap(), + "type" to EntityType.relationship) +} + +fun RecordMetadata.toMap(): Map = mapOf( + "offset" to offset(), + "timestamp" to timestamp(), + "keySize" to serializedKeySize(), + "valueSize" to serializedValueSize(), + "partition" to partition() +) + +fun ConstraintDefinition.streamsConstraintType(): StreamsConstraintType { + return when (this.constraintType) { + ConstraintType.UNIQUENESS, ConstraintType.NODE_KEY -> StreamsConstraintType.UNIQUE + else -> if (isNodeConstraint()) StreamsConstraintType.NODE_PROPERTY_EXISTS else StreamsConstraintType.RELATIONSHIP_PROPERTY_EXISTS + } +} + +fun ConstraintDefinition.isNodeConstraint(): Boolean { + return try { this.label; true } catch (e: IllegalStateException) { false } +} + +fun ConstraintDefinition.isRelationshipConstraint(): Boolean { + return try { this.relationshipType; true } catch (e: IllegalStateException) { false } +} + +fun StreamsTransactionEvent.asSourceRecordValue(strategy: String): StreamsTransactionEvent? = + if(isStrategyCompact(strategy) && meta.operation == OperationType.deleted) null else this + +fun StreamsTransactionEvent.asSourceRecordKey(strategy: String): Any = + when { + isStrategyCompact(strategy) && payload is NodePayload -> nodePayloadAsMessageKey(payload as NodePayload, schema) + isStrategyCompact(strategy) && payload is RelationshipPayload -> relationshipAsMessageKey(payload as RelationshipPayload) + else -> "${meta.txId + meta.txEventId}-${meta.txEventId}" + } + +private fun nodePayloadAsMessageKey(payload: NodePayload, schema: Schema) = run { + val nodeChange: NodeChange = payload.after ?: payload.before!! + val labels = nodeChange.labels ?: emptyList() + val props: Map = nodeChange.properties ?: emptyMap() + val keys = getNodeKeys(labels, props.keys, schema.constraints) + val ids = props.filterKeys { keys.contains(it) } + + if (ids.isEmpty()) payload.id else mapOf("ids" to ids, "labels" to labels) +} + +private fun RelationshipNodeChange.toKey(): Any = if (ids.isEmpty()) id else mapOf("ids" to ids, "labels" to labels) + +private fun relationshipAsMessageKey(payload: RelationshipPayload) = mapOf( + "start" to payload.start.toKey(), + "end" to payload.end.toKey(), + "label" to payload.label) + +private fun isStrategyCompact(strategy: String) = strategy == TopicConfig.CLEANUP_POLICY_COMPACT \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/producer/RoutingConfiguration.kt b/extended/src/main/kotlin/apoc/kafka/producer/RoutingConfiguration.kt new file mode 100644 index 0000000000..54465a75ec --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/producer/RoutingConfiguration.kt @@ -0,0 +1,251 @@ +package apoc.kafka.producer + +import org.apache.commons.lang3.StringUtils +import org.apache.kafka.common.internals.Topic +import org.neo4j.graphdb.Entity +import org.neo4j.graphdb.Node +import org.neo4j.graphdb.Relationship +import org.neo4j.logging.Log +import apoc.kafka.events.* + + +private val PATTERN_REG: Regex = "^(\\s*\\:*\\s*\\`*\\s*\\w+\\s*(?:\\:*\\s*\\`*\\s*\\:?(?:[\\w\\`|\\*]+)\\s*)*\\`*\\:?)\\s*(?:\\{\\s*(-?[\\w|\\*]+\\s*(?:,\\s*-?[\\w|\\*]+\\s*)*)\\})?\$".toRegex() +private val PATTERN_COLON_REG = "\\s*:\\s*(?=(?:[^\\`]*\\`[^\\`]*\\`)*[^\\`]*\$)".toRegex() +private val PATTERN_COMMA = "\\s*,\\s*".toRegex() +private const val PATTERN_WILDCARD = "*" +private const val PATTERN_PROP_MINUS = '-' +private const val PATTERN_SPLIT = ";" +private const val BACKTICK_CHAR = "`" + +data class RoutingProperties(val all: Boolean, + val include: List, + val exclude: List) { + companion object { + fun from(matcher: MatchResult): RoutingProperties { + val props = matcher.groupValues[2].trim().let { if (it.isEmpty()) emptyList() else it.trim().split( + PATTERN_COMMA + ) } + val include = if (props.isEmpty()) { + emptyList() + } else { + props.filter { it != PATTERN_WILDCARD && !it.startsWith(PATTERN_PROP_MINUS) } + } + val exclude = if (props.isEmpty()) { + emptyList() + } else { + props.filter { it != PATTERN_WILDCARD && it.startsWith(PATTERN_PROP_MINUS) }.map { it.substring(1) } + } + val all = props.isEmpty() || props.contains(PATTERN_WILDCARD) + return RoutingProperties(all = all, include = include, exclude = exclude) + } + } +} + +abstract class RoutingConfiguration { + abstract val topic: String + abstract val all: Boolean + abstract val include: List + abstract val exclude: List + abstract fun filter(entity: Entity): Map +} + +private fun hasLabel(label: String, streamsTransactionEvent: StreamsTransactionEvent): Boolean { + if (streamsTransactionEvent.payload.type == EntityType.relationship) { + return false + } + val payload = when(streamsTransactionEvent.meta.operation) { + OperationType.deleted -> streamsTransactionEvent.payload.before as NodeChange + else -> streamsTransactionEvent.payload.after as NodeChange + } + return payload.labels.orEmpty().contains(label) +} + +private fun isRelationshipType(name: String, streamsTransactionEvent: StreamsTransactionEvent): Boolean { + if (streamsTransactionEvent.payload.type == EntityType.node) { + return false + } + val relationshipChange = streamsTransactionEvent.payload as RelationshipPayload + return relationshipChange.label == name +} + +private fun filterProperties(properties: Map?, routingConfiguration: RoutingConfiguration): Map? { + if (properties == null) { + return null + } + if (!routingConfiguration.all) { + if (routingConfiguration.include.isNotEmpty()) { + return properties!!.filter { prop -> routingConfiguration.include.contains(prop.key) } + } + if (routingConfiguration.exclude.isNotEmpty()) { + return properties!!.filter { prop -> !routingConfiguration.exclude.contains(prop.key) } + } + + } + return properties +} + +data class NodeRoutingConfiguration(val labels: List = emptyList(), + override val topic: String = "neo4j", + override val all: Boolean = true, + override val include: List = emptyList(), + override val exclude: List = emptyList()): RoutingConfiguration() { + + override fun filter(node: Entity): Map { + if (node !is Node) { + throw IllegalArgumentException("argument must be and instance of ${Node::class.java.name}") + } + val properties = filterProperties(node.allProperties, this) + val map = node.toMap().toMutableMap() + map["properties"] = properties + return map + } + + companion object { + fun parse(topic: String, pattern: String): List { + Topic.validate(topic) + if (pattern == PATTERN_WILDCARD) { + return listOf(NodeRoutingConfiguration(topic = topic)) + } + return pattern.split(PATTERN_SPLIT).map { + val matcher = PATTERN_REG.matchEntire(it) + if (matcher == null) { + throw IllegalArgumentException("The pattern $pattern for topic $topic is invalid") + } else { + val labels = matcher.groupValues[1].trim().split(PATTERN_COLON_REG).map { it.replace(BACKTICK_CHAR, StringUtils.EMPTY) }.filter{ it.isNotBlank() } + val properties = RoutingProperties.from(matcher) + NodeRoutingConfiguration(labels = labels, topic = topic, all = properties.all, + include = properties.include, exclude = properties.exclude) + } + } + } + + fun prepareEvent(streamsTransactionEvent: StreamsTransactionEvent, routingConf: List): Map { + return routingConf + .filter { + it.labels.isEmpty() || it.labels.any { hasLabel(it, streamsTransactionEvent) } + } + .map { + val nodePayload = streamsTransactionEvent.payload as NodePayload + val newRecordBefore = if (nodePayload.before != null) { + val recordBefore = nodePayload.before as NodeChange + recordBefore.copy(properties = filterProperties(streamsTransactionEvent.payload.before?.properties, it), + labels = recordBefore.labels) + } else { + null + } + val newRecordAfter = if (nodePayload.after != null) { + val recordAfter = nodePayload.after as NodeChange + recordAfter.copy(properties = filterProperties(streamsTransactionEvent.payload.after?.properties, it), + labels = recordAfter.labels) + } else { + null + } + + val newNodePayload = nodePayload.copy(id = nodePayload.id, + before = newRecordBefore, + after = newRecordAfter) + + val newStreamsEvent = streamsTransactionEvent.copy(schema = streamsTransactionEvent.schema, + meta = streamsTransactionEvent.meta, + payload = newNodePayload) + + it.topic to newStreamsEvent + } + .associateBy({ it.first }, { it.second }) + } + } +} + +data class RelationshipRoutingConfiguration(val name: String = "", + val relKeyStrategy: RelKeyStrategy = RelKeyStrategy.DEFAULT, + override val topic: String = "neo4j", + override val all: Boolean = true, + override val include: List = emptyList(), + override val exclude: List = emptyList()): RoutingConfiguration() { + + override fun filter(relationship: Entity): Map { + if (relationship !is Relationship) { + throw IllegalArgumentException("argument must be and instance of ${Relationship::class.java.name}") + } + val properties = filterProperties(relationship.allProperties, this) + val map = relationship.toMap().toMutableMap() + map["properties"] = properties + return map + } + + companion object { + fun parse(topic: String, pattern: String, keyStrategyString: String = RelKeyStrategy.DEFAULT.toString(), log: Log? = null): List { + Topic.validate(topic) + if (pattern == PATTERN_WILDCARD) { + return listOf(RelationshipRoutingConfiguration(topic = topic)) + } + return pattern.split(PATTERN_SPLIT).map { + val matcher = PATTERN_REG.matchEntire(it) + if (matcher == null) { + throw IllegalArgumentException("The pattern $pattern for topic $topic is invalid") + } else { + val labels = matcher.groupValues[1].split(PATTERN_COLON_REG) + if (labels.size > 1) { + throw IllegalArgumentException("The pattern $pattern for topic $topic is invalid") + } + val properties = RoutingProperties.from(matcher) + + val relKeyStrategy = try { + RelKeyStrategy.valueOf(keyStrategyString.toUpperCase()) + } catch (e: IllegalArgumentException) { + log?.warn("Invalid key strategy setting, switching to default value ${RelKeyStrategy.DEFAULT.toString().toLowerCase()}") + RelKeyStrategy.DEFAULT + } + + RelationshipRoutingConfiguration(name = labels.first().trim().replace(BACKTICK_CHAR, StringUtils.EMPTY), + topic = topic, all = properties.all, + include = properties.include, exclude = properties.exclude, relKeyStrategy = relKeyStrategy) + } + } + } + + fun prepareEvent(streamsTransactionEvent: StreamsTransactionEvent, routingConf: List): Map { + return routingConf + .filter { + it.name.isNullOrBlank() || isRelationshipType(it.name, streamsTransactionEvent) + } + .map { + val relationshipPayload = streamsTransactionEvent.payload as RelationshipPayload + + val newRecordBefore = if (relationshipPayload.before != null) { + val recordBefore = relationshipPayload.before as RelationshipChange + recordBefore.copy(properties = filterProperties(streamsTransactionEvent.payload.before?.properties, it)) + } else { + null + } + val newRecordAfter = if (relationshipPayload.after != null) { + val recordAfter = relationshipPayload.after as RelationshipChange + recordAfter.copy(properties = filterProperties(streamsTransactionEvent.payload.after?.properties, it)) + } else { + null + } + + val newRelationshipPayload = relationshipPayload.copy(id = relationshipPayload.id, + before = newRecordBefore, + after = newRecordAfter, + label = relationshipPayload.label) + + val newStreamsEvent = streamsTransactionEvent.copy(schema = streamsTransactionEvent.schema, + meta = streamsTransactionEvent.meta, + payload = newRelationshipPayload) + + it.topic to newStreamsEvent + } + .associateBy({ it.first }, { it.second }) + } + } +} + +object RoutingConfigurationFactory { + fun getRoutingConfiguration(topic: String, line: String, entityType: EntityType, keyStrategy: String = RelKeyStrategy.DEFAULT.toString(), log: Log? = null): List { + return when (entityType) { + EntityType.node -> NodeRoutingConfiguration.parse(topic, line) + EntityType.relationship -> RelationshipRoutingConfiguration.parse(topic, line, keyStrategy, log) + } + } +} diff --git a/extended/src/main/kotlin/apoc/kafka/producer/StreamsEventRouterConfiguration.kt b/extended/src/main/kotlin/apoc/kafka/producer/StreamsEventRouterConfiguration.kt new file mode 100644 index 0000000000..292361ea9b --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/producer/StreamsEventRouterConfiguration.kt @@ -0,0 +1,99 @@ +package apoc.kafka.producer + +import org.apache.commons.lang3.StringUtils +import org.neo4j.logging.Log +import apoc.kafka.config.StreamsConfig +import apoc.kafka.events.EntityType +import apoc.kafka.events.RelKeyStrategy + + +private inline fun filterMap(config: Map, routingPrefix: String, dbName: String = "", routingSuffix: String? = null, log: Log? = null): List { + val entityType = when (T::class) { + NodeRoutingConfiguration::class -> EntityType.node + RelationshipRoutingConfiguration::class -> EntityType.relationship + else -> throw IllegalArgumentException("The class must be an instance of RoutingConfiguration") + } + return config + .filterKeys { + val startWithPrefixAndNotEndWithSuffix = it.startsWith(routingPrefix) && routingSuffix?.let { suffix -> !it.endsWith(suffix) } ?: true + if (it.contains(StreamsRoutingConfigurationConstants.FROM)) { + val topicDbName = it.replace(routingPrefix, StringUtils.EMPTY) + .split(StreamsRoutingConfigurationConstants.FROM)[1] + startWithPrefixAndNotEndWithSuffix && topicDbName == dbName // for `from.` we compare the routing prefix and the db name + } else { + // for the default db we only filter by routingPrefix + dbName == "" && startWithPrefixAndNotEndWithSuffix + } + } + .flatMap { + val prefixAndTopic = it.key.split(StreamsRoutingConfigurationConstants.FROM)[0] + + val keyStrategy = routingSuffix?.let { suffix -> + print("suffix - $suffix") + config.entries.firstOrNull{ it.key.startsWith(prefixAndTopic) && it.key.endsWith(suffix) }?.value + } ?: RelKeyStrategy.DEFAULT.toString().toLowerCase() + + RoutingConfigurationFactory + .getRoutingConfiguration(prefixAndTopic.replace(routingPrefix, StringUtils.EMPTY), + it.value, entityType, keyStrategy, log) as List + } +} + +private object StreamsRoutingConfigurationConstants { + const val NODE_ROUTING_KEY_PREFIX: String = "apoc.kafka.source.topic.nodes." + const val REL_ROUTING_KEY_PREFIX: String = "apoc.kafka.source.topic.relationships." + const val SCHEMA_POLLING_INTERVAL = "apoc.kafka.source.schema.polling.interval" + const val FROM = ".from." + const val KEY_STRATEGY_SUFFIX = ".key_strategy" +} + +data class StreamsEventRouterConfiguration(val enabled: Boolean = StreamsConfig.SOURCE_ENABLED_VALUE, + val proceduresEnabled: Boolean = StreamsConfig.PROCEDURES_ENABLED_VALUE, + val nodeRouting: List = listOf( + NodeRoutingConfiguration() + ), + val relRouting: List = listOf( + RelationshipRoutingConfiguration() + ), + val schemaPollingInterval: Long = 300000) { + + fun allTopics(): List { + val nodeTopics = nodeRouting.map { it.topic } + val relTopics = relRouting.map { it.topic } + return nodeTopics + relTopics + } + + companion object { + + fun from(streamsConfig: Map, dbName: String, isDefaultDb: Boolean, log: Log? = null): StreamsEventRouterConfiguration { + var nodeRouting = filterMap(config = streamsConfig, + routingPrefix = StreamsRoutingConfigurationConstants.NODE_ROUTING_KEY_PREFIX, + dbName = dbName) + var relRouting = filterMap(config = streamsConfig, + routingPrefix = StreamsRoutingConfigurationConstants.REL_ROUTING_KEY_PREFIX, + dbName = dbName, + routingSuffix = StreamsRoutingConfigurationConstants.KEY_STRATEGY_SUFFIX, + log = log) + + if (isDefaultDb) { + nodeRouting += filterMap(config = streamsConfig, + routingPrefix = StreamsRoutingConfigurationConstants.NODE_ROUTING_KEY_PREFIX + ) + relRouting += filterMap(config = streamsConfig, + routingPrefix = StreamsRoutingConfigurationConstants.REL_ROUTING_KEY_PREFIX, + routingSuffix = StreamsRoutingConfigurationConstants.KEY_STRATEGY_SUFFIX, + log = log) + } + + val default = StreamsEventRouterConfiguration() + return default.copy( + enabled = StreamsConfig.isSourceEnabled(streamsConfig, dbName), + proceduresEnabled = StreamsConfig.hasProceduresEnabled(streamsConfig, dbName), + nodeRouting = if (nodeRouting.isEmpty()) listOf(NodeRoutingConfiguration(topic = dbName)) else nodeRouting, + relRouting = if (relRouting.isEmpty()) listOf(RelationshipRoutingConfiguration(topic = dbName)) else relRouting, + schemaPollingInterval = streamsConfig.getOrDefault(StreamsRoutingConfigurationConstants.SCHEMA_POLLING_INTERVAL, default.schemaPollingInterval).toString().toLong() + ) + } + + } +} diff --git a/extended/src/main/kotlin/apoc/kafka/producer/StreamsRouterConfigurationListener.kt b/extended/src/main/kotlin/apoc/kafka/producer/StreamsRouterConfigurationListener.kt new file mode 100644 index 0000000000..57cd1291e9 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/producer/StreamsRouterConfigurationListener.kt @@ -0,0 +1,38 @@ +package apoc.kafka.producer + +import apoc.kafka.PublishProcedures +import kotlinx.coroutines.sync.Mutex +import org.neo4j.kernel.internal.GraphDatabaseAPI +import org.neo4j.logging.Log +import apoc.kafka.extensions.isDefaultDb +import apoc.kafka.producer.kafka.KafkaConfiguration +import apoc.kafka.producer.kafka.KafkaEventRouter +import apoc.kafka.utils.KafkaUtil.getConsumerProperties + +class StreamsRouterConfigurationListener(private val db: GraphDatabaseAPI, + private val log: Log) { + + private var streamsEventRouter: KafkaEventRouter? = null + private var streamsEventRouterConfiguration: StreamsEventRouterConfiguration? = null + + private var lastConfig: KafkaConfiguration? = null + + fun shutdown() { + if (streamsEventRouterConfiguration?.enabled == true) { + streamsEventRouter?.stop() + streamsEventRouter = null + PublishProcedures.unregister(db) + } + } + + fun start(configMap: Map) { + lastConfig = KafkaConfiguration.create(configMap) + streamsEventRouterConfiguration = StreamsEventRouterConfiguration.from(configMap, db.databaseName(), isDefaultDb = db.isDefaultDb(), log) + streamsEventRouter = KafkaEventRouter(configMap, db, log) + if (streamsEventRouterConfiguration?.enabled == true || streamsEventRouterConfiguration?.proceduresEnabled == true) { + streamsEventRouter!!.start() + } + PublishProcedures.register(db, streamsEventRouter!!) + log.info("[Source] Streams Source module initialised") + } +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/producer/events/StreamsEventBuilder.kt b/extended/src/main/kotlin/apoc/kafka/producer/events/StreamsEventBuilder.kt new file mode 100644 index 0000000000..6c2d7c03dd --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/producer/events/StreamsEventBuilder.kt @@ -0,0 +1,298 @@ +package apoc.kafka.producer.events + +import org.neo4j.graphdb.Node +import org.neo4j.graphdb.Path +import org.neo4j.graphdb.Relationship +import apoc.kafka.producer.NodeRoutingConfiguration +import apoc.kafka.producer.RelationshipRoutingConfiguration +import apoc.kafka.events.* +import apoc.kafka.producer.toMap + + +class StreamsEventMetaBuilder { + + private var timestamp: Long? = null + private var username: String? = null + private var txId: Long? = null + private var txEventId: Int? = null + private var txEventsCount: Int? = null + private var operation: OperationType? = null + private var source: MutableMap = mutableMapOf() + + fun withTimestamp(timestamp : Long) : StreamsEventMetaBuilder { + this.timestamp = timestamp + return this + } + + fun withUsername(username : String) : StreamsEventMetaBuilder { + this.username = username + return this + } + + fun withTransactionId(txId : Long) : StreamsEventMetaBuilder { + this.txId = txId + return this + } + + fun withTransactionEventId(txEventId : Int) : StreamsEventMetaBuilder { + this.txEventId = txEventId + return this + } + + fun withTransactionEventsCount(txEventsCount : Int) : StreamsEventMetaBuilder { + this.txEventsCount = txEventsCount + return this + } + + fun withOperation(op : OperationType) : StreamsEventMetaBuilder { + this.operation = op + return this + } + + fun withSource(key : String, value : Any) : StreamsEventMetaBuilder { + this.source.put(key, value) + return this + } + + fun withHostname(host : String) : StreamsEventMetaBuilder { + this.source.put("hostname", host) + return this + } + + fun build() : Meta { + return Meta(timestamp!!, username!!, txId!!, txEventId!!, txEventsCount!!, operation!!, source) + } + +} + +class NodeChangeBuilder { + + private var labels : List = listOf() + private var properties : Map = mapOf() + + fun withLabels(labels : List) : NodeChangeBuilder { + this.labels = labels + return this + } + + fun withProperties(properties : Map) : NodeChangeBuilder { + this.properties = properties + return this + } + + fun build() : NodeChange { + return NodeChange(properties = properties, labels = labels) + } +} + +class NodePayloadBuilder { + + private var id : String = "0" + private var after : NodeChange? = null + private var before : NodeChange? = null + + fun withId(id : String) : NodePayloadBuilder { + this.id = id + return this + } + + fun withBefore(before : NodeChange) : NodePayloadBuilder { + this.before = before + return this + } + + fun withAfter(after : NodeChange) : NodePayloadBuilder { + this.after = after + return this + } + + fun build() : NodePayload { + return NodePayload(id, before, after) + } +} + +class RelationshipChangeBuilder { + + private var properties : Map = mapOf() + + fun withProperties(properties : Map) : RelationshipChangeBuilder { + this.properties = properties + return this + } + + fun build() : RelationshipChange { + return RelationshipChange(properties= properties) + } +} + +class RelationshipPayloadBuilder { + private var id: String = "0" + private var after: RelationshipChange? = null + private var before: RelationshipChange? = null + private var name: String? = null + private var startNode : RelationshipNodeChange? = null + private var endNode : RelationshipNodeChange? = null + + fun withStartNode(id: String, labels: List, ids: Map): RelationshipPayloadBuilder { + this.startNode = RelationshipNodeChange(id, labels, ids) + return this + } + + fun withEndNode(id: String, labels: List, ids: Map): RelationshipPayloadBuilder { + this.endNode = RelationshipNodeChange(id, labels, ids) + return this + } + + fun withId(id: String): RelationshipPayloadBuilder { + this.id = id + return this + } + + fun withBefore(before: RelationshipChange): RelationshipPayloadBuilder { + this.before = before + return this + } + + fun withAfter(after: RelationshipChange): RelationshipPayloadBuilder { + this.after = after + return this + } + + fun withName(name: String): RelationshipPayloadBuilder { + this.name = name + return this + } + + fun build(): RelationshipPayload { + return RelationshipPayload(id = id, before = before, after = after, label = name!!, start = startNode!!, end = endNode!! ) + } +} + +class SchemaBuilder { + + private lateinit var payload: Payload + private lateinit var constraints: Set + + fun withPayload(payload: Payload): SchemaBuilder { + this.payload = payload + return this + } + + fun withConstraints(constraints: Set): SchemaBuilder { + this.constraints = constraints + return this + } + + private fun mapPropertiesToTypes(properties: RecordChange?): Map { + return properties?.properties + ?.mapValues { + val clazz = it.value::class + if (clazz.java.isArray) { + "${it.value::class.java.componentType.simpleName}[]" + } else { + it.value::class.java.simpleName + } + } + .orEmpty() + } + + fun build(): Schema { + return Schema(mapPropertiesToTypes(payload.after ?: payload.before), constraints.toList()) + } +} + +class StreamsTransactionEventBuilder { + + private var meta: Meta? = null + private var payload: Payload? = null + private var schema: Schema? = null + + fun withMeta(meta : Meta): StreamsTransactionEventBuilder { + this.meta = meta + return this + } + + fun withPayload(payload : Payload): StreamsTransactionEventBuilder { + this.payload = payload + return this + } + + fun withSchema(schema : Schema): StreamsTransactionEventBuilder { + this.schema = schema + return this + } + + fun build(): StreamsTransactionEvent { + return StreamsTransactionEvent(meta!!, payload!!, schema!!) + } +} + +class StreamsEventBuilder { + + private lateinit var payload: Any + private lateinit var topic: String + private var nodeRoutingConfiguration: NodeRoutingConfiguration? = null + private var relationshipRoutingConfiguration: RelationshipRoutingConfiguration? = null + + fun withPayload(payload: Any): StreamsEventBuilder { + this.payload = payload + return this + } + + fun withTopic(topic: String): StreamsEventBuilder { + this.topic = topic + return this + } + + fun withNodeRoutingConfiguration(nodeRoutingConfiguration: NodeRoutingConfiguration?): StreamsEventBuilder { + this.nodeRoutingConfiguration = nodeRoutingConfiguration + return this + } + + fun withRelationshipRoutingConfiguration(relationshipRoutingConfiguration: RelationshipRoutingConfiguration?): StreamsEventBuilder { + this.relationshipRoutingConfiguration = relationshipRoutingConfiguration + return this + } + + private fun buildPayload(topic: String, payload: Any?): Any? { + if (payload == null) { + return null + } + return when (payload) { + is Node -> { + if (nodeRoutingConfiguration != null) { + nodeRoutingConfiguration!!.filter(payload) + } else { + payload.toMap() + } + } + is Relationship -> { + if (relationshipRoutingConfiguration != null) { + relationshipRoutingConfiguration!!.filter(payload) + } else { + payload.toMap() + } + } + is Path -> { + val length = payload.length() + val rels = payload.relationships().map { buildPayload(topic, it) } + val nodes = payload.nodes().map { buildPayload(topic, it) } + mapOf("length" to length, "rels" to rels, "nodes" to nodes) + } + is Map<*, *> -> { + payload.mapValues { buildPayload(topic, it.value) } + } + is List<*> -> { + payload.map { buildPayload(topic, it) } + } + else -> { + payload + } + } + } + + fun build(): StreamsEvent { + return StreamsEvent(buildPayload(topic, payload)!!) + } + +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/producer/kafka/KafkaAdminService.kt b/extended/src/main/kotlin/apoc/kafka/producer/kafka/KafkaAdminService.kt new file mode 100644 index 0000000000..63e0188e25 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/producer/kafka/KafkaAdminService.kt @@ -0,0 +1,57 @@ +package apoc.kafka.producer.kafka + +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.GlobalScope +import kotlinx.coroutines.Job +import kotlinx.coroutines.cancelAndJoin +import kotlinx.coroutines.delay +import kotlinx.coroutines.isActive +import kotlinx.coroutines.launch +import kotlinx.coroutines.runBlocking +import org.apache.kafka.clients.admin.AdminClient +import org.neo4j.logging.Log +import apoc.kafka.utils.KafkaUtil.isAutoCreateTopicsEnabled +import apoc.kafka.utils.KafkaUtil.getInvalidTopics +import apoc.kafka.utils.KafkaUtil +import java.util.Collections +import java.util.concurrent.ConcurrentHashMap + +class KafkaAdminService(private val props: KafkaConfiguration, /*private val allTopics: List, */private val log: Log) { + private val client = AdminClient.create(props.asProperties()) + private val kafkaTopics: MutableSet = Collections.newSetFromMap(ConcurrentHashMap()) + private val isAutoCreateTopicsEnabled = isAutoCreateTopicsEnabled(client) + private lateinit var job: Job + + fun start() { + if (!isAutoCreateTopicsEnabled) { + job = GlobalScope.launch(Dispatchers.IO) { + while (isActive) { + try { + kafkaTopics += client.listTopics().names().get() + } catch (e: Exception) { + log.warn("""Cannot retrieve valid topics because the following exception, + |next attempt is in ${props.topicDiscoveryPollingInterval} ms: + """.trimMargin(), e) + } + delay(props.topicDiscoveryPollingInterval) + } + client.close() + } + } + } + + fun stop() { + KafkaUtil.ignoreExceptions({ + runBlocking { + job.cancelAndJoin() + } + }, UninitializedPropertyAccessException::class.java) + } + + fun isValidTopic(topic: String) = when (isAutoCreateTopicsEnabled) { + true -> true + else -> kafkaTopics.contains(topic) + } + +// fun getInvalidTopics() = getInvalidTopics(client, allTopics) +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/producer/kafka/KafkaConfiguration.kt b/extended/src/main/kotlin/apoc/kafka/producer/kafka/KafkaConfiguration.kt new file mode 100644 index 0000000000..0e3eed5342 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/producer/kafka/KafkaConfiguration.kt @@ -0,0 +1,105 @@ +package apoc.kafka.producer.kafka + +import org.apache.commons.lang3.StringUtils +import org.apache.kafka.clients.CommonClientConfigs +import org.apache.kafka.clients.producer.ProducerConfig +import org.apache.kafka.common.config.TopicConfig +import org.apache.kafka.common.serialization.ByteArraySerializer +import org.neo4j.logging.Log +import apoc.kafka.extensions.getInt +import apoc.kafka.extensions.toPointCase +import apoc.kafka.utils.JSONUtils +import apoc.kafka.utils.KafkaUtil.validateConnection +import java.util.Properties +import java.util.concurrent.TimeUnit + +enum class LogStrategy { delete, compact } + +private val configPrefix = "apoc.kafka." + +data class KafkaConfiguration(val bootstrapServers: String = "localhost:9092", + val acks: String = "1", + val retries: Int = 2, + val batchSize: Int = 16384, + val bufferMemory: Int = 33554432, + val reindexBatchSize: Int = 1000, + val sessionTimeoutMs: Int = 15 * 1000, + val connectionTimeoutMs: Int = 10 * 1000, + val replication: Int = 1, + val transactionalId: String = StringUtils.EMPTY, + val lingerMs: Int = 1, + val topicDiscoveryPollingInterval: Long = TimeUnit.MINUTES.toMillis(5), + val logCompactionStrategy: String = LogStrategy.delete.toString(), + val extraProperties: Map = emptyMap()) { + + companion object { + // Visible for testing + fun create(cfg: Map): KafkaConfiguration { + val config = cfg.filterKeys { it.startsWith(configPrefix) }.mapKeys { it.key.substring(configPrefix.length) } + + val default = KafkaConfiguration() + + val keys = JSONUtils.asMap(default).keys.map { it.toPointCase() } + val extraProperties = config.filterKeys { !keys.contains(it) } + + return default.copy(bootstrapServers = config.getOrDefault("bootstrap.servers", default.bootstrapServers), + acks = config.getOrDefault("acks", default.acks), + retries = config.getInt("retries", default.retries), + batchSize = config.getInt("batch.size", default.batchSize), + bufferMemory = config.getInt("buffer.memory", default.bufferMemory), + reindexBatchSize = config.getInt("reindex.batch.size", default.reindexBatchSize), + sessionTimeoutMs = config.getInt("session.timeout.ms", default.sessionTimeoutMs), + connectionTimeoutMs = config.getInt("connection.timeout.ms", default.connectionTimeoutMs), + replication = config.getInt("replication", default.replication), + transactionalId = config.getOrDefault("transactional.id", default.transactionalId), + lingerMs = config.getInt("linger.ms", default.lingerMs), + topicDiscoveryPollingInterval = config.getOrDefault("topic.discovery.polling.interval", + default.topicDiscoveryPollingInterval).toString().toLong(), + logCompactionStrategy = config.getOrDefault("log.compaction.strategy", default.logCompactionStrategy), + extraProperties = extraProperties // for what we don't provide a default configuration + ) + } + + fun from(cfg: Map, log: Log): KafkaConfiguration { + val kafkaCfg = create(cfg) + validate(kafkaCfg, cfg, log) + return kafkaCfg + } + + private fun validate(config: KafkaConfiguration, rawConfig: Map, log: Log? = null) { + validateConnection(config.bootstrapServers, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, false) + try { + LogStrategy.valueOf(config.logCompactionStrategy) + } catch (e: IllegalArgumentException) { + log?.warn("Invalid log compaction strategy setting, switching to default value ${TopicConfig.CLEANUP_POLICY_DELETE}") + config.logCompactionStrategy.apply { LogStrategy.delete.toString() } + } + } + + } + + fun asProperties(): Properties { + val props = Properties() + val map = JSONUtils.asMap(this) + .filter { + if (it.key == "transactionalId") { + it.value != StringUtils.EMPTY + } else { + true + } + } + .mapKeys { it.key.toPointCase() } + props.putAll(map) + props.putAll(extraProperties) + props.putAll(addSerializers()) // Fixed serializers + return props + } + + private fun addSerializers() : Properties { + val props = Properties() + props[ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG] = ByteArraySerializer::class.java + props[ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG] = ByteArraySerializer::class.java + return props + } + +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/producer/kafka/KafkaEventRouter.kt b/extended/src/main/kotlin/apoc/kafka/producer/kafka/KafkaEventRouter.kt new file mode 100644 index 0000000000..237db91a29 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/producer/kafka/KafkaEventRouter.kt @@ -0,0 +1,194 @@ +package apoc.kafka.producer.kafka + +import apoc.kafka.events.StreamsEvent +import apoc.kafka.events.KafkaStatus +import apoc.kafka.events.StreamsTransactionEvent +import apoc.kafka.extensions.isDefaultDb +//import apoc.kafka.producer.StreamsEventRouter +import apoc.kafka.producer.StreamsEventRouterConfiguration +import apoc.kafka.producer.asSourceRecordKey +import apoc.kafka.producer.asSourceRecordValue +import apoc.kafka.producer.toMap +import apoc.kafka.utils.JSONUtils +import apoc.kafka.utils.KafkaUtil +import kotlinx.coroutines.runBlocking +import kotlinx.coroutines.sync.Mutex +import kotlinx.coroutines.sync.withLock +import org.apache.kafka.clients.producer.KafkaProducer +import org.apache.kafka.clients.producer.ProducerRecord +import org.apache.kafka.common.KafkaException +import org.apache.kafka.common.errors.AuthorizationException +import org.apache.kafka.common.errors.OutOfOrderSequenceException +import org.apache.kafka.common.errors.ProducerFencedException +import org.neo4j.graphdb.GraphDatabaseService +import org.neo4j.logging.Log +import java.util.* + + +class KafkaEventRouter(private val config: Map, + private val db: GraphDatabaseService, + private val log: Log) { + + val eventRouterConfiguration: StreamsEventRouterConfiguration = StreamsEventRouterConfiguration + .from(config, db.databaseName(), db.isDefaultDb(), log) + + + private val mutex = Mutex() + + private var producer: Neo4jKafkaProducer? = null + private val kafkaConfig by lazy { KafkaConfiguration.from(config, log) } + private val kafkaAdminService by lazy { KafkaAdminService(kafkaConfig, log) } + + private fun status(producer: Neo4jKafkaProducer<*, *>?): KafkaStatus = when (producer != null) { + true -> KafkaStatus.RUNNING + else -> KafkaStatus.STOPPED + } + + fun start() = runBlocking { + mutex.withLock(producer) { + if (status(producer) == KafkaStatus.RUNNING) { + return@runBlocking + } + log.info("Initialising Kafka Connector") + val props = kafkaConfig.asProperties() + producer = Neo4jKafkaProducer(props) + producer!!.initTransactions() + log.info("Kafka Connector started") + } + } + + fun stop() = runBlocking { + mutex.withLock(producer) { + if (status(producer) == KafkaStatus.STOPPED) { + return@runBlocking + } + KafkaUtil.ignoreExceptions({ producer?.flush() }, UninitializedPropertyAccessException::class.java) + KafkaUtil.ignoreExceptions({ producer?.close() }, UninitializedPropertyAccessException::class.java) + KafkaUtil.ignoreExceptions({ kafkaAdminService.stop() }, UninitializedPropertyAccessException::class.java) + producer = null + } + } + + private fun send(producerRecord: ProducerRecord, sync: Boolean = false): Map? { + if (!kafkaAdminService.isValidTopic(producerRecord.topic())) { + if (log.isDebugEnabled) { + log.debug("Error while sending record to ${producerRecord.topic()}, because it doesn't exists") + } + // TODO add logging system here + return null + } + return if (sync) { + producer?.send(producerRecord)?.get()?.toMap() + } else { + producer?.send(producerRecord) { meta, error -> + if (meta != null && log.isDebugEnabled) { + log.debug("Successfully sent record in partition ${meta.partition()} offset ${meta.offset()} data ${meta.topic()} key size ${meta.serializedKeySize()}") + } + if (error != null) { + if (log.isDebugEnabled) { + log.debug("Error while sending record to ${producerRecord.topic()}, because of the following exception:", error) + } + // TODO add logging system here + } + } + null + } + } + + // this method is used by the procedures + private fun sendEvent(topic: String, event: StreamsEvent, config: Map, sync: Boolean = false): Map? { + if (log.isDebugEnabled) { + log.debug("Trying to send a simple event with payload ${event.payload} to kafka") + } + // in the procedures we allow to define a custom message key via the configuration property key + // in order to have the backwards compatibility we define as default value the old key + val key = config.getOrDefault("key", UUID.randomUUID().toString()) + val partition = (config["partition"])?.toString()?.toInt() + + val producerRecord = ProducerRecord(topic, partition, System.currentTimeMillis(), key?.let { JSONUtils.writeValueAsBytes(it) }, + JSONUtils.writeValueAsBytes(event)) + return send(producerRecord, sync) + } + + // this method is used by the transaction event handler + private fun sendEvent(topic: String, event: StreamsTransactionEvent, config: Map) { + if (log.isDebugEnabled) { + log.debug("Trying to send a transaction event with txId ${event.meta.txId} and txEventId ${event.meta.txEventId} to kafka") + } + val key = JSONUtils.writeValueAsBytes(event.asSourceRecordKey(kafkaConfig.logCompactionStrategy)) + val value = event.asSourceRecordValue(kafkaConfig.logCompactionStrategy)?.let { JSONUtils.writeValueAsBytes(it) } + + val producerRecord = ProducerRecord(topic, null, System.currentTimeMillis(), key, value) + send(producerRecord) + } + + fun sendEventsSync(topic: String, transactionEvents: List, config: Map): List> { + producer?.beginTransaction() + + val results = transactionEvents.mapNotNull { + sendEvent(topic, it, config, true) + } + producer?.commitTransaction() + + return results + } + + fun sendEvents(topic: String, transactionEvents: List, config: Map) { + try { + producer?.beginTransaction() + transactionEvents.forEach { + if (it is StreamsTransactionEvent) { + sendEvent(topic, it, config) + } else { + sendEvent(topic, it, config) + } + } + producer?.commitTransaction() + } catch (e: ProducerFencedException) { + log.error("Another producer with the same transactional.id has been started. Stack trace is:", e) + producer?.close() + } catch (e: OutOfOrderSequenceException) { + log.error("The broker received an unexpected sequence number from the producer. Stack trace is:", e) + producer?.close() + } catch (e: AuthorizationException) { + log.error("Error in authorization. Stack trace is:", e) + producer?.close() + } catch (e: KafkaException) { + log.error("Generic kafka error. Stack trace is:", e) + producer?.abortTransaction() + } + } + +} + +class Neo4jKafkaProducer: KafkaProducer { + private val isTransactionEnabled: Boolean + constructor(props: Properties): super(props) { + isTransactionEnabled = props.containsKey("transactional.id") + } + + override fun initTransactions() { + if (isTransactionEnabled) { + super.initTransactions() + } + } + + override fun beginTransaction() { + if (isTransactionEnabled) { + super.beginTransaction() + } + } + + override fun commitTransaction() { + if (isTransactionEnabled) { + super.commitTransaction() + } + } + + override fun abortTransaction() { + if (isTransactionEnabled) { + super.abortTransaction() + } + } + +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/service/StreamsSinkService.kt b/extended/src/main/kotlin/apoc/kafka/service/StreamsSinkService.kt new file mode 100644 index 0000000000..80343b6f74 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/service/StreamsSinkService.kt @@ -0,0 +1,42 @@ +package apoc.kafka.service + +import apoc.kafka.service.sink.strategy.IngestionStrategy + + +const val STREAMS_TOPIC_KEY: String = "apoc.kafka.sink.topic" +const val STREAMS_TOPIC_CDC_KEY: String = "apoc.kafka.sink.topic.cdc" + +enum class TopicTypeGroup { CYPHER, CDC, PATTERN, CUD } +enum class TopicType(val group: TopicTypeGroup, val key: String) { + CDC_SOURCE_ID(group = TopicTypeGroup.CDC, key = "$STREAMS_TOPIC_CDC_KEY.sourceId"), + CYPHER(group = TopicTypeGroup.CYPHER, key = "$STREAMS_TOPIC_KEY.cypher"), + PATTERN_NODE(group = TopicTypeGroup.PATTERN, key = "$STREAMS_TOPIC_KEY.pattern.node"), + PATTERN_RELATIONSHIP(group = TopicTypeGroup.PATTERN, key = "$STREAMS_TOPIC_KEY.pattern.relationship"), + CDC_SCHEMA(group = TopicTypeGroup.CDC, key = "$STREAMS_TOPIC_CDC_KEY.schema"), + CUD(group = TopicTypeGroup.CUD, key = "$STREAMS_TOPIC_KEY.cud") +} + +data class StreamsSinkEntity(val key: Any?, val value: Any?) + +abstract class StreamsStrategyStorage { + abstract fun getTopicType(topic: String): TopicType? + + abstract fun getStrategy(topic: String): IngestionStrategy +} + +abstract class StreamsSinkService(private val streamsStrategyStorage: StreamsStrategyStorage) { + + abstract fun write(query: String, events: Collection) + + private fun writeWithStrategy(data: Collection, strategy: IngestionStrategy) { + strategy.mergeNodeEvents(data).forEach { write(it.query, it.events) } + strategy.deleteNodeEvents(data).forEach { write(it.query, it.events) } + + strategy.mergeRelationshipEvents(data).forEach { write(it.query, it.events) } + strategy.deleteRelationshipEvents(data).forEach { write(it.query, it.events) } + } + + fun writeForTopic(topic: String, params: Collection) { + writeWithStrategy(params, streamsStrategyStorage.getStrategy(topic)) + } +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/service/Topics.kt b/extended/src/main/kotlin/apoc/kafka/service/Topics.kt new file mode 100644 index 0000000000..30009730db --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/service/Topics.kt @@ -0,0 +1,127 @@ +package apoc.kafka.service + +import apoc.kafka.service.sink.strategy.* +import kotlin.reflect.jvm.javaType + +class TopicValidationException(message: String): RuntimeException(message) + +private fun TopicType.replaceKeyBy(replacePrefix: Pair) = if (replacePrefix.first.isNullOrBlank()) + this.key + else + this.key.replace(replacePrefix.first, replacePrefix.second) + +data class Topics(val cypherTopics: Map = emptyMap(), + val cdcSourceIdTopics: Set = emptySet(), + val cdcSchemaTopics: Set = emptySet(), + val cudTopics: Set = emptySet(), + val nodePatternTopics: Map = emptyMap(), + val relPatternTopics: Map = emptyMap(), + val invalid: List = emptyList()) { + + operator fun plus(other: Topics): Topics { + return Topics(cypherTopics = this.cypherTopics + other.cypherTopics, + cdcSourceIdTopics = this.cdcSourceIdTopics + other.cdcSourceIdTopics, + cdcSchemaTopics = this.cdcSchemaTopics + other.cdcSchemaTopics, + cudTopics = this.cudTopics + other.cudTopics, + nodePatternTopics = this.nodePatternTopics + other.nodePatternTopics, + relPatternTopics = this.relPatternTopics + other.relPatternTopics, + invalid = this.invalid + other.invalid) + } + + fun allTopics(): List = this.asMap() + .map { + if (it.key.group == TopicTypeGroup.CDC || it.key.group == TopicTypeGroup.CUD) { + (it.value as Set).toList() + } else { + (it.value as Map).keys.toList() + } + } + .flatten() + + fun asMap(): Map = mapOf(TopicType.CYPHER to cypherTopics, TopicType.CUD to cudTopics, + TopicType.CDC_SCHEMA to cdcSchemaTopics, TopicType.CDC_SOURCE_ID to cdcSourceIdTopics, + TopicType.PATTERN_NODE to nodePatternTopics, TopicType.PATTERN_RELATIONSHIP to relPatternTopics) + + companion object { + fun from(map: Map, replacePrefix: Pair = ("" to ""), dbName: String = "", invalidTopics: List = emptyList()): Topics { + val config = map + .filterKeys { if (dbName.isNotBlank()) it.toLowerCase().endsWith(".to.$dbName") else !it.contains(".to.") } + .mapKeys { if (dbName.isNotBlank()) it.key.replace(".to.$dbName", "", true) else it.key } + val cypherTopicPrefix = TopicType.CYPHER.replaceKeyBy(replacePrefix) + val sourceIdKey = TopicType.CDC_SOURCE_ID.replaceKeyBy(replacePrefix) + val schemaKey = TopicType.CDC_SCHEMA.replaceKeyBy(replacePrefix) + val cudKey = TopicType.CUD.replaceKeyBy(replacePrefix) + val nodePatterKey = TopicType.PATTERN_NODE.replaceKeyBy(replacePrefix) + val relPatterKey = TopicType.PATTERN_RELATIONSHIP.replaceKeyBy(replacePrefix) + val cypherTopics = TopicUtils.filterByPrefix(config, cypherTopicPrefix) + val nodePatternTopics = TopicUtils + .filterByPrefix(config, nodePatterKey, invalidTopics) + .mapValues { NodePatternConfiguration.parse(it.value) } + val relPatternTopics = TopicUtils + .filterByPrefix(config, relPatterKey, invalidTopics) + .mapValues { RelationshipPatternConfiguration.parse(it.value) } + val cdcSourceIdTopics = TopicUtils.splitTopics(config[sourceIdKey] as? String, invalidTopics) + val cdcSchemaTopics = TopicUtils.splitTopics(config[schemaKey] as? String, invalidTopics) + val cudTopics = TopicUtils.splitTopics(config[cudKey] as? String, invalidTopics) + return Topics(cypherTopics, cdcSourceIdTopics, cdcSchemaTopics, cudTopics, nodePatternTopics, relPatternTopics) + } + } +} + +object TopicUtils { + + @JvmStatic val TOPIC_SEPARATOR = ";" + + fun filterByPrefix(config: Map<*, *>, prefix: String, invalidTopics: List = emptyList()): Map { + val fullPrefix = "$prefix." + return config + .filterKeys { it.toString().startsWith(fullPrefix) } + .mapKeys { it.key.toString().replace(fullPrefix, "") } + .filterKeys { !invalidTopics.contains(it) } + .mapValues { it.value.toString() } + } + + fun splitTopics(cdcMergeTopicsString: String?, invalidTopics: List = emptyList()): Set { + return if (cdcMergeTopicsString.isNullOrBlank()) { + emptySet() + } else { + cdcMergeTopicsString.split(TOPIC_SEPARATOR) + .filter { !invalidTopics.contains(it) } + .toSet() + } + } + + inline fun validate(topics: Topics) { + val exceptionStringConstructor = T::class.constructors + .first { it.parameters.size == 1 && it.parameters[0].type.javaType == String::class.java }!! + val crossDefinedTopics = topics.allTopics() + .groupBy({ it }, { 1 }) + .filterValues { it.sum() > 1 } + .keys + if (crossDefinedTopics.isNotEmpty()) { + throw exceptionStringConstructor + .call("The following topics are cross defined: $crossDefinedTopics") + } + } + + fun toStrategyMap(topics: Topics, sourceIdStrategyConfig: SourceIdIngestionStrategyConfig): Map { + return topics.asMap() + .filterKeys { it != TopicType.CYPHER } + .mapValues { (type, config) -> + when (type) { + TopicType.CDC_SOURCE_ID -> SourceIdIngestionStrategy(sourceIdStrategyConfig) + TopicType.CDC_SCHEMA -> SchemaIngestionStrategy() + TopicType.CUD -> CUDIngestionStrategy() + TopicType.PATTERN_NODE -> { + val map = config as Map + map.mapValues { NodePatternIngestionStrategy(it.value) } + } + TopicType.PATTERN_RELATIONSHIP -> { + val map = config as Map + map.mapValues { RelationshipPatternIngestionStrategy(it.value) } + } + else -> throw RuntimeException("Unsupported topic type $type") + } + } + } +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/service/errors/ErrorService.kt b/extended/src/main/kotlin/apoc/kafka/service/errors/ErrorService.kt new file mode 100644 index 0000000000..a333a430eb --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/service/errors/ErrorService.kt @@ -0,0 +1,105 @@ +package apoc.kafka.service.errors + +import org.apache.avro.generic.GenericRecord +import org.apache.kafka.clients.consumer.ConsumerRecord +import org.apache.kafka.common.record.RecordBatch +import apoc.kafka.extensions.toMap +import apoc.kafka.utils.JSONUtils +import java.util.* + + +data class ErrorData(val originalTopic: String, + val timestamp: Long, + val key: ByteArray?, + val value: ByteArray?, + val partition: String, + val offset: String, + val executingClass: Class<*>?, + val databaseName: String?, + val exception: Exception?) { + + constructor(originalTopic: String, timestamp: Long?, key: Any?, value: Any?, + partition: Int, offset: Long, executingClass: Class<*>?, databaseName: String?, exception: Exception?) : + this(originalTopic, timestamp ?: RecordBatch.NO_TIMESTAMP, toByteArray(key), toByteArray(value), partition.toString(),offset.toString(), executingClass, databaseName, exception) + + companion object { + + fun from(consumerRecord: ConsumerRecord, exception: Exception?, executingClass: Class<*>?, databaseName: String?): ErrorData { + return ErrorData(offset = consumerRecord.offset().toString(), + originalTopic = consumerRecord.topic(), + partition = consumerRecord.partition().toString(), + timestamp = consumerRecord.timestamp(), + exception = exception, + executingClass = executingClass, + key = toByteArray(consumerRecord.key()), + value = toByteArray(consumerRecord.value()), + databaseName = databaseName) + } + + fun toByteArray(v:Any?) = try { + when (v) { + null -> null + is ByteArray -> v + is GenericRecord -> JSONUtils.writeValueAsBytes(mapOf("schema" to v.schema.toMap(), "record" to v.toMap())) + else -> v.toString().toByteArray(Charsets.UTF_8) + } + } catch (e:Exception) { + null + } + } + fun toLogString() = + """ +ErrorData(originalTopic=$originalTopic, timestamp=$timestamp, partition=$partition, offset=$offset, exception=$exception, key=${key?.toString(Charsets.UTF_8)}, value=${value?.sliceArray(0..Math.min(value.size,200)-1)?.toString(Charsets.UTF_8)}, executingClass=$executingClass) + """.trimIndent() + +} + +abstract class ErrorService(private val config: Map = emptyMap()) { + + data class ErrorConfig(val fail:Boolean=false, val log:Boolean=false, val logMessages:Boolean=false, + val dlqTopic:String? = null, val dlqHeaderPrefix:String = "", val dlqHeaders:Boolean = false, val dlqReplication: Int? = 3) { + + companion object { + const val TOLERANCE = "errors.tolerance" + const val LOG = "errors.log.enable" + const val LOG_MESSAGES = "errors.log.include.messages" + const val DLQ_TOPIC = "errors.deadletterqueue.topic.name" + const val DLQ_HEADERS = "errors.deadletterqueue.context.headers.enable" + const val DLQ_HEADER_PREFIX = "errors.deadletterqueue.context.headers.prefix" + const val DLQ_REPLICATION = "errors.deadletterqueue.topic.replication.factor" + + fun from(props: Properties) = from(props.toMap() as Map) + + fun boolean(v:Any?) = when (v) { + null -> false + "true" -> true + "false" -> false + is Boolean -> v + else -> false + } + fun int(v:Any?) = when (v) { + null -> 0 + is Int -> v + is String -> v.toInt() + else -> 0 + } + + fun from(config: Map) = + ErrorConfig( + fail = config.getOrDefault(TOLERANCE, "none") == "none", + log = boolean(config.get(LOG)), + logMessages = boolean(config.get(LOG_MESSAGES)), + dlqTopic = config.get(DLQ_TOPIC) as String?, + dlqHeaders = boolean(config.get(DLQ_HEADERS)), + dlqHeaderPrefix = config.getOrDefault(DLQ_HEADER_PREFIX,"") as String, + dlqReplication = int(config.getOrDefault(DLQ_REPLICATION, 3))) + } + } + + abstract fun report(errorDatas: List) + + open fun close() {} +} + +class ProcessingError(val errorDatas: List) : + RuntimeException("Error processing ${errorDatas.size} messages\n"+errorDatas.map { it.toLogString() }.joinToString("\n")) diff --git a/extended/src/main/kotlin/apoc/kafka/service/errors/KafkaErrorService.kt b/extended/src/main/kotlin/apoc/kafka/service/errors/KafkaErrorService.kt new file mode 100644 index 0000000000..864a4deb3e --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/service/errors/KafkaErrorService.kt @@ -0,0 +1,97 @@ +package apoc.kafka.service.errors + +import org.apache.commons.lang3.exception.ExceptionUtils +import org.apache.kafka.clients.CommonClientConfigs +import org.apache.kafka.clients.producer.KafkaProducer +import org.apache.kafka.clients.producer.Producer +import org.apache.kafka.clients.producer.ProducerConfig +import org.apache.kafka.clients.producer.ProducerRecord +import org.apache.kafka.common.record.RecordBatch +import org.apache.kafka.common.serialization.ByteArraySerializer +import org.neo4j.util.VisibleForTesting +import apoc.kafka.utils.KafkaUtil.validateConnection +import java.util.* + +class KafkaErrorService(private val producer: Producer?, private val errorConfig: ErrorConfig, private val log: (String, Exception?)->Unit): ErrorService() { + + constructor(config: Properties, errorConfig: ErrorConfig, + log: (String, Exception?) -> Unit) : this(producer(errorConfig, config, log), errorConfig, log) + + companion object { + private fun producer(errorConfig: ErrorConfig, config: Properties, log: (String, Exception?) -> Unit) = + errorConfig.dlqTopic?.let { + try { + val bootstrapServers = config.getOrDefault(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "").toString() + validateConnection(bootstrapServers, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, false) + config[ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG] = ByteArraySerializer::class.java.name + config[ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG] = ByteArraySerializer::class.java.name + KafkaProducer(config) + } catch (e: Exception) { + log("Cannot initialize the custom DLQ because of the following exception: ", e) + null + } + } + } + + override fun report(errorDatas: List) { + if (errorConfig.fail) throw ProcessingError(errorDatas) + if (errorConfig.log) { + if (errorConfig.logMessages) { + errorDatas.forEach{log(it.toLogString(),it.exception)} + } else { + errorDatas.map { it.exception }.distinct().forEach{log("Error processing ${errorDatas.size} messages",it)} + } + } + + errorDatas.forEach { dlqData -> + producer?.let { + try { + val producerRecord = if (dlqData.timestamp == RecordBatch.NO_TIMESTAMP) { + ProducerRecord(errorConfig.dlqTopic, null, dlqData.key, dlqData.value) + } else { + ProducerRecord(errorConfig.dlqTopic, null, dlqData.timestamp, dlqData.key, dlqData.value) + } + if (errorConfig.dlqHeaders) { + val producerHeader = producerRecord.headers() + populateContextHeaders(dlqData).forEach { (key, value) -> producerHeader.add(key, value) } + } + it.send(producerRecord) + } catch (e: Exception) { + log("Error writing to DLQ $e: ${dlqData.toLogString()}", e) // todo only the first or all + } + } + } + } + + @VisibleForTesting + fun populateContextHeaders(errorData: ErrorData): Map { + fun prefix(suffix: String) = errorConfig.dlqHeaderPrefix + suffix + + val headers = mutableMapOf( + prefix("topic") to errorData.originalTopic.toByteArray(), + prefix("partition") to errorData.partition.toByteArray(), + prefix("offset") to errorData.offset.toByteArray()) + + if (!errorData.databaseName.isNullOrBlank()) { + headers[prefix("databaseName")] = errorData.databaseName.toByteArray() + } + + if (errorData.executingClass != null) { + headers[prefix("class.name")] = errorData.executingClass.name.toByteArray() + } + if (errorData.exception != null) { + headers[prefix("exception.class.name")] = errorData.exception.javaClass.name.toByteArray() + if (errorData.exception.message != null) { + headers[prefix("exception.message")] = errorData.exception.message.toString().toByteArray() + } + headers[prefix("exception.stacktrace")] = ExceptionUtils.getStackTrace(errorData.exception).toByteArray() + } + return headers + } + + + override fun close() { + this.producer?.close() + } + +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/CUDIngestionStrategy.kt b/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/CUDIngestionStrategy.kt new file mode 100644 index 0000000000..9e3d294620 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/CUDIngestionStrategy.kt @@ -0,0 +1,282 @@ +package apoc.kafka.service.sink.strategy + +import apoc.kafka.events.EntityType +import apoc.kafka.extensions.quote +import apoc.kafka.service.StreamsSinkEntity +import apoc.kafka.service.sink.strategy.CUDIngestionStrategy.Companion.FROM_KEY +import apoc.kafka.service.sink.strategy.CUDIngestionStrategy.Companion.TO_KEY +import apoc.kafka.utils.JSONUtils +import apoc.kafka.utils.KafkaUtil.getLabelsAsString +import apoc.kafka.utils.KafkaUtil.getNodeKeysAsString +import apoc.kafka.utils.KafkaUtil + + +enum class CUDOperations { create, merge, update, delete, match } + +abstract class CUD { + abstract val op: CUDOperations + abstract val type: EntityType + abstract val properties: Map +} + +data class CUDNode(override val op: CUDOperations, + override val properties: Map = emptyMap(), + val ids: Map = emptyMap(), + val detach: Boolean = true, + val labels: List = emptyList()): CUD() { + override val type = EntityType.node + + fun toMap(): Map { + return when (op) { + CUDOperations.delete -> mapOf("ids" to ids) + else -> mapOf("ids" to ids, "properties" to properties) + } + } +} + +data class CUDNodeRel(val ids: Map = emptyMap(), + val labels: List, + val op: CUDOperations = CUDOperations.match) + +data class CUDRelationship(override val op: CUDOperations, + override val properties: Map = emptyMap(), + val rel_type: String, + val from: CUDNodeRel, + val to: CUDNodeRel): CUD() { + override val type = EntityType.relationship + + fun toMap(): Map { + val from = mapOf("ids" to from.ids) + val to = mapOf("ids" to to.ids) + return when (op) { + CUDOperations.delete -> mapOf(FROM_KEY to from, + TO_KEY to to) + else -> mapOf(FROM_KEY to from, + TO_KEY to to, + "properties" to properties) + } + } +} + + +class CUDIngestionStrategy: IngestionStrategy { + + companion object { + @JvmStatic val ID_KEY = "ids" + @JvmStatic val PHYSICAL_ID_KEY = "_id" + @JvmStatic val FROM_KEY = "from" + @JvmStatic val TO_KEY = "to" + + private val LIST_VALID_CUD_NODE_REL = listOf(CUDOperations.merge, CUDOperations.create, CUDOperations.match) + private val LIST_VALID_CUD_REL = listOf(CUDOperations.create, CUDOperations.merge, CUDOperations.update) + } + + data class NodeRelMetadata(val labels: List, val ids: Set, val op: CUDOperations = CUDOperations.match) + + private fun CUDRelationship.isValidOperation(): Boolean = from.op in LIST_VALID_CUD_NODE_REL && to.op in LIST_VALID_CUD_NODE_REL && op in LIST_VALID_CUD_REL + + private fun NodeRelMetadata.getOperation() = op.toString().toUpperCase() + + private fun buildNodeLookupByIds(keyword: String = "MATCH", ids: Set, labels: List, identifier: String = "n", field: String = ""): String { + val fullField = if (field.isNotBlank()) "$field." else field + val quotedIdentifier = identifier.quote() + return when (ids.contains(PHYSICAL_ID_KEY)) { + true -> "MATCH ($quotedIdentifier) WHERE id($quotedIdentifier) = event.$fullField$ID_KEY._id" + else -> "$keyword ($quotedIdentifier${getLabelsAsString(labels)} {${getNodeKeysAsString(keys = ids, prefix = "$fullField$ID_KEY")}})" + } + } + + private fun buildNodeCreateStatement(labels: List): String = """ + |${KafkaUtil.UNWIND} + |CREATE (n${getLabelsAsString(labels)}) + |SET n = event.properties + """.trimMargin() + + private fun buildRelCreateStatement(from: NodeRelMetadata, to: NodeRelMetadata, + rel_type: String): String = """ + |${KafkaUtil.UNWIND} + |${buildNodeLookupByIds(keyword = from.getOperation(), ids = from.ids, labels = from.labels, identifier = FROM_KEY, field = FROM_KEY)} + |${KafkaUtil.WITH_EVENT_FROM} + |${buildNodeLookupByIds(keyword = to.getOperation(), ids = to.ids, labels = to.labels, identifier = TO_KEY, field = TO_KEY)} + |CREATE ($FROM_KEY)-[r:${rel_type.quote()}]->($TO_KEY) + |SET r = event.properties + """.trimMargin() + + private fun buildNodeMergeStatement(labels: List, ids: Set): String = """ + |${KafkaUtil.UNWIND} + |${buildNodeLookupByIds(keyword = "MERGE", ids = ids, labels = labels)} + |SET n += event.properties + """.trimMargin() + + private fun buildRelMergeStatement(from: NodeRelMetadata, to: NodeRelMetadata, + rel_type: String): String = """ + |${KafkaUtil.UNWIND} + |${buildNodeLookupByIds(keyword = from.getOperation(), ids = from.ids, labels = from.labels, identifier = FROM_KEY, field = FROM_KEY)} + |${KafkaUtil.WITH_EVENT_FROM} + |${buildNodeLookupByIds(keyword = to.getOperation(), ids = to.ids, labels = to.labels, identifier = TO_KEY, field = TO_KEY)} + |MERGE ($FROM_KEY)-[r:${rel_type.quote()}]->($TO_KEY) + |SET r += event.properties + """.trimMargin() + + private fun buildNodeUpdateStatement(labels: List, ids: Set): String = """ + |${KafkaUtil.UNWIND} + |${buildNodeLookupByIds(ids = ids, labels = labels)} + |SET n += event.properties + """.trimMargin() + + private fun buildRelUpdateStatement(from: NodeRelMetadata, to: NodeRelMetadata, + rel_type: String): String = """ + |${KafkaUtil.UNWIND} + |${buildNodeLookupByIds(ids = from.ids, labels = from.labels, identifier = FROM_KEY, field = FROM_KEY)} + |${buildNodeLookupByIds(ids = to.ids, labels = to.labels, identifier = TO_KEY, field = TO_KEY)} + |MATCH ($FROM_KEY)-[r:${rel_type.quote()}]->($TO_KEY) + |SET r += event.properties + """.trimMargin() + + private fun buildDeleteStatement(labels: List, ids: Set, detach: Boolean): String = """ + |${KafkaUtil.UNWIND} + |${buildNodeLookupByIds(ids = ids, labels = labels)} + |${if (detach) "DETACH " else ""}DELETE n + """.trimMargin() + + private fun buildRelDeleteStatement(from: NodeRelMetadata, to: NodeRelMetadata, + rel_type: String): String = """ + |${KafkaUtil.UNWIND} + |${buildNodeLookupByIds(ids = from.ids, labels = from.labels, identifier = FROM_KEY, field = FROM_KEY)} + |${buildNodeLookupByIds(ids = to.ids, labels = to.labels, identifier = TO_KEY, field = TO_KEY)} + |MATCH ($FROM_KEY)-[r:${rel_type.quote()}]->($TO_KEY) + |DELETE r + """.trimMargin() + + private inline fun toCUDEntity(it: Any): T? { + return when (it) { + is T -> it + is Map<*, *> -> { + val type = it["type"]?.toString() + val entityType = if (type == null) null else EntityType.valueOf(type) + when { + entityType == null -> throw RuntimeException("No `type` field found") + entityType != null && EntityType.node == entityType && T::class.java != CUDNode::class.java -> null + entityType != null && EntityType.relationship == entityType && T::class.java != CUDRelationship::class.java -> null + else -> JSONUtils.convertValue(it) + } + } + else -> null + } + } + + private fun getLabels(relNode: CUDNodeRel) = if (relNode.ids.containsKey(PHYSICAL_ID_KEY)) emptyList() else relNode.labels + private fun getLabels(node: CUDNode) = if (node.ids.containsKey(PHYSICAL_ID_KEY)) emptyList() else node.labels + + override fun mergeNodeEvents(events: Collection): List { + val data = events + .mapNotNull { + it.value?.let { + try { + val data = toCUDEntity(it) + when (data?.op) { + CUDOperations.merge -> if (data.ids.isNotEmpty() && data.properties.isNotEmpty()) data else null // TODO send to the DLQ the null + CUDOperations.update, CUDOperations.create -> if (data.properties.isNotEmpty()) data else null // TODO send to the DLQ the null + else -> null + } + } catch (e: Exception) { + null + } + } + } + .groupBy({ it.op }, { it }) + + val create = data[CUDOperations.create] + .orEmpty() + .groupBy { getLabels(it) } + .map { QueryEvents(buildNodeCreateStatement(it.key), it.value.map { it.toMap() }) } + val merge = data[CUDOperations.merge] + .orEmpty() + .groupBy { getLabels(it) to it.ids.keys } + .map { QueryEvents(buildNodeMergeStatement(it.key.first, it.key.second), it.value.map { it.toMap() }) } + val update = data[CUDOperations.update] + .orEmpty() + .groupBy { getLabels(it) to it.ids.keys } + .map { QueryEvents(buildNodeUpdateStatement(it.key.first, it.key.second), it.value.map { it.toMap() }) } + return (create + merge + update) // we'll group the data because of in case of `_id` key is present the generated queries are the same for update/merge + .map { it.query to it.events } + .groupBy({ it.first }, { it.second }) + .map { QueryEvents(it.key, it.value.flatten()) } + } + + override fun deleteNodeEvents(events: Collection): List { + return events + .mapNotNull { + it.value?.let { + try { + val data = toCUDEntity(it) + when (data?.op) { + CUDOperations.delete -> if (data.ids.isNotEmpty() && data.properties.isEmpty()) data else null // TODO send to the DLQ the null + else -> null // TODO send to the DLQ the null + } + } catch (e: Exception) { + null + } + } + } + .groupBy { Triple(it.labels, it.ids.keys, it.detach) } + .map { + val (labels, keys, detach) = it.key + QueryEvents(buildDeleteStatement(labels, keys, detach), it.value.map { it.toMap() }) + } + } + + override fun mergeRelationshipEvents(events: Collection): List { + val data = events + .mapNotNull { + it.value?.let { + try { + val data = toCUDEntity(it) + when { + data!!.isValidOperation() -> if (data.from.ids.isNotEmpty() && data.to.ids.isNotEmpty()) data else null // TODO send to the DLQ the null + else -> null // TODO send to the DLQ the null + } + } catch (e: Exception) { + null + } + } + } + .groupBy({ it.op }, { it }) + + return data.flatMap { (op, list) -> + list.groupBy { Triple(NodeRelMetadata(getLabels(it.from), it.from.ids.keys, it.from.op), NodeRelMetadata(getLabels(it.to), it.to.ids.keys, it.to.op), it.rel_type) } + .map { + val (from, to, rel_type) = it.key + val query = when (op) { + CUDOperations.create -> buildRelCreateStatement(from, to, rel_type) + CUDOperations.merge -> buildRelMergeStatement(from, to, rel_type) + else -> buildRelUpdateStatement(from, to, rel_type) + } + QueryEvents(query, it.value.map { it.toMap() }) + } + } + } + + override fun deleteRelationshipEvents(events: Collection): List { + return events + .mapNotNull { + it.value?.let { + try { + val data = toCUDEntity(it) + when (data?.op) { + CUDOperations.delete -> if (data.from.ids.isNotEmpty() && data.to.ids.isNotEmpty()) data else null // TODO send to the DLQ the null + else -> null // TODO send to the DLQ the null + } + } catch (e: Exception) { + null + } + } + } + .groupBy { Triple(NodeRelMetadata(getLabels(it.from), it.from.ids.keys), NodeRelMetadata(getLabels(it.to), it.to.ids.keys), it.rel_type) } + .map { + val (from, to, rel_type) = it.key + QueryEvents(buildRelDeleteStatement(from, to, rel_type), it.value.map { it.toMap() }) + } + } + +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/IngestionStrategy.kt b/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/IngestionStrategy.kt new file mode 100644 index 0000000000..714406baf6 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/IngestionStrategy.kt @@ -0,0 +1,37 @@ +package apoc.kafka.service.sink.strategy + +import apoc.kafka.events.Constraint +import apoc.kafka.events.RelationshipPayload +import apoc.kafka.service.StreamsSinkEntity + + +data class QueryEvents(val query: String, val events: List>) + +interface IngestionStrategy { + fun mergeNodeEvents(events: Collection): List + fun deleteNodeEvents(events: Collection): List + fun mergeRelationshipEvents(events: Collection): List + fun deleteRelationshipEvents(events: Collection): List +} + +data class RelationshipSchemaMetadata(val label: String, + val startLabels: List, + val endLabels: List, + val startKeys: Set, + val endKeys: Set) { + constructor(payload: RelationshipPayload) : this(label = payload.label, + startLabels = payload.start.labels.orEmpty(), + endLabels = payload.end.labels.orEmpty(), + startKeys = payload.start.ids.keys, + endKeys = payload.end.ids.keys) +} + +data class NodeSchemaMetadata(val constraints: List, + val labelsToAdd: List, + val labelsToDelete: List, + val keys: Set) + + + +data class NodeMergeMetadata(val labelsToAdd: Set, + val labelsToDelete: Set) \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/NodePatternIngestionStrategy.kt b/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/NodePatternIngestionStrategy.kt new file mode 100644 index 0000000000..b22bdf8080 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/NodePatternIngestionStrategy.kt @@ -0,0 +1,91 @@ +package apoc.kafka.service.sink.strategy + +import apoc.kafka.extensions.flatten +import apoc.kafka.service.StreamsSinkEntity +import apoc.kafka.utils.JSONUtils +import apoc.kafka.utils.KafkaUtil.containsProp +import apoc.kafka.utils.KafkaUtil.getLabelsAsString +import apoc.kafka.utils.KafkaUtil.getNodeMergeKeys +import apoc.kafka.utils.KafkaUtil + +class NodePatternIngestionStrategy(private val nodePatternConfiguration: NodePatternConfiguration): IngestionStrategy { + + private val mergeNodeTemplate: String = """ + |${KafkaUtil.UNWIND} + |MERGE (n${getLabelsAsString(nodePatternConfiguration.labels)}{${ + getNodeMergeKeys("keys", nodePatternConfiguration.keys) + }}) + |SET n = event.properties + |SET n += event.keys + """.trimMargin() + + private val deleteNodeTemplate: String = """ + |${KafkaUtil.UNWIND} + |MATCH (n${getLabelsAsString(nodePatternConfiguration.labels)}{${ + getNodeMergeKeys("keys", nodePatternConfiguration.keys) + }}) + |DETACH DELETE n + """.trimMargin() + + override fun mergeNodeEvents(events: Collection): List { + val data = events + .mapNotNull { if (it.value != null) JSONUtils.asMap(it.value) else null } + .mapNotNull { toData(nodePatternConfiguration, it) } + return if (data.isEmpty()) { + emptyList() + } else { + listOf(QueryEvents(mergeNodeTemplate, data)) + } + } + + override fun deleteNodeEvents(events: Collection): List { + val data = events + .filter { it.value == null && it.key != null } + .mapNotNull { if (it.key != null) JSONUtils.asMap(it.key) else null } + .mapNotNull { toData(nodePatternConfiguration, it, false) } + return if (data.isEmpty()) { + emptyList() + } else { + listOf(QueryEvents(deleteNodeTemplate, data)) + } + } + + override fun mergeRelationshipEvents(events: Collection): List { + return emptyList() + } + + override fun deleteRelationshipEvents(events: Collection): List { + return emptyList() + } + + companion object { + fun toData(nodePatternConfiguration: NodePatternConfiguration, props: Map, withProperties: Boolean = true): Map>? { + val properties = props.flatten() + val containsKeys = nodePatternConfiguration.keys.all { properties.containsKey(it) } + return if (containsKeys) { + val filteredProperties = when (nodePatternConfiguration.type) { + PatternConfigurationType.ALL -> properties.filterKeys { !nodePatternConfiguration.keys.contains(it) } + PatternConfigurationType.EXCLUDE -> properties.filterKeys { key -> + val containsProp = containsProp(key, nodePatternConfiguration.properties) + !nodePatternConfiguration.keys.contains(key) && !containsProp + } + PatternConfigurationType.INCLUDE -> properties.filterKeys { key -> + val containsProp = containsProp(key, nodePatternConfiguration.properties) + !nodePatternConfiguration.keys.contains(key) && containsProp + } + } + if (withProperties) { + mapOf("keys" to properties.filterKeys { nodePatternConfiguration.keys.contains(it) }, + "properties" to filteredProperties) + } else { + mapOf("keys" to properties.filterKeys { nodePatternConfiguration.keys.contains(it) }) + } + } else { + null + } + } + + + } + +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/PatternConfiguration.kt b/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/PatternConfiguration.kt new file mode 100644 index 0000000000..8e6b36093d --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/PatternConfiguration.kt @@ -0,0 +1,198 @@ +package apoc.kafka.service.sink.strategy + +import apoc.kafka.extensions.quote + +enum class PatternConfigurationType { ALL, INCLUDE, EXCLUDE } + +private const val ID_PREFIX = "!" +private const val MINUS_PREFIX = "-" +private const val LABEL_SEPARATOR = ":" +private const val PROPERTIES_SEPARATOR = "," + +private fun getPatternConfiguredType(properties: List): PatternConfigurationType { + if (properties.isEmpty()) { + return PatternConfigurationType.ALL + } + return when (properties[0].trim()[0]) { + '*' -> PatternConfigurationType.ALL + '-' -> PatternConfigurationType.EXCLUDE + else -> PatternConfigurationType.INCLUDE + } +} + +private fun isHomogeneousPattern(type: PatternConfigurationType, properties: List, pattern: String, entityType: String) { + val isHomogeneous = when (type) { + PatternConfigurationType.INCLUDE -> properties.all { it.trim()[0].isJavaIdentifierStart() } + PatternConfigurationType.EXCLUDE -> properties.all { it.trim().startsWith(MINUS_PREFIX) } + PatternConfigurationType.ALL -> properties.isEmpty() || properties == listOf("*") + } + if (!isHomogeneous) { + throw IllegalArgumentException("The $entityType pattern $pattern is not homogeneous") + } +} + +private fun cleanProperties(type: PatternConfigurationType, properties: List): List { + return when (type) { + PatternConfigurationType.INCLUDE -> properties.map { it.trim() } + PatternConfigurationType.EXCLUDE -> properties.map { it.trim().replace(MINUS_PREFIX, "") } + PatternConfigurationType.ALL -> emptyList() + } +} + +interface PatternConfiguration + +data class NodePatternConfiguration(val keys: Set, val type: PatternConfigurationType, + val labels: List, val properties: List): PatternConfiguration { + companion object { + + // (:LabelA{!id,foo,bar}) + @JvmStatic private val cypherNodePatternConfigured = """\((:\w+\s*(?::\s*(?:\w+)\s*)*)\s*(?:\{\s*(-?[\w!\.]+\s*(?:,\s*-?[!\w\*\.]+\s*)*)\})?\)$""".toRegex() + // LabelA{!id,foo,bar} + @JvmStatic private val simpleNodePatternConfigured = """^(\w+\s*(?::\s*(?:\w+)\s*)*)\s*(?:\{\s*(-?[\w!\.]+\s*(?:,\s*-?[!\w\*\.]+\s*)*)\})?$""".toRegex() + fun parse(pattern: String): NodePatternConfiguration { + val isCypherPattern = pattern.startsWith("(") + val regex = if (isCypherPattern) cypherNodePatternConfigured else simpleNodePatternConfigured + val matcher = regex.matchEntire(pattern) + if (matcher == null) { + throw IllegalArgumentException("The Node pattern $pattern is invalid") + } else { + val labels = matcher.groupValues[1] + .split(LABEL_SEPARATOR) + .let { + if (isCypherPattern) it.drop(1) else it + } + .map { it.quote() } + val allProperties = matcher.groupValues[2].split(PROPERTIES_SEPARATOR) + val keys = allProperties + .filter { it.startsWith(ID_PREFIX) } + .map { it.trim().substring(1) }.toSet() + if (keys.isEmpty()) { + throw IllegalArgumentException("The Node pattern $pattern must contains at lest one key") + } + val properties = allProperties.filter { !it.startsWith(ID_PREFIX) } + val type = getPatternConfiguredType(properties) + isHomogeneousPattern(type, properties, pattern, "Node") + val cleanedProperties = cleanProperties(type, properties) + + return NodePatternConfiguration(keys = keys, type = type, + labels = labels, properties = cleanedProperties) + } + } + } +} + + +data class RelationshipPatternConfiguration(val start: NodePatternConfiguration, val end: NodePatternConfiguration, + val relType: String, val type: PatternConfigurationType, + val properties: List): PatternConfiguration { + companion object { + + // we don't allow ALL for start/end nodes in rels + // it's public for testing purpose + fun getNodeConf(pattern: String): NodePatternConfiguration { + val start = NodePatternConfiguration.parse(pattern) + return if (start.type == PatternConfigurationType.ALL) { + NodePatternConfiguration(keys = start.keys, type = PatternConfigurationType.INCLUDE, + labels = start.labels, properties = start.properties) + } else { + start + } + } + + // (:Source{!id})-[:REL_TYPE{foo, -bar}]->(:Target{!targetId}) + private val cypherRelationshipPatternConfigured = """^\(:(.*?)\)(<)?-\[(?::)([\w\_]+)(\{\s*(-?[\w\*\.]+\s*(?:,\s*-?[\w\*\.]+\s*)*)\})?\]-(>)?\(:(.*?)\)$""".toRegex() + // LabelA{!id} REL_TYPE{foo, -bar} LabelB{!targetId} + private val simpleRelationshipPatternConfigured = """^(.*?) ([\w\_]+)(\{\s*(-?[\w\*\.]+\s*(?:,\s*-?[\w\*\.]+\s*)*)\})? (.*?)$""".toRegex() + + data class RelationshipPatternMetaData(val startPattern: String, val endPattern: String, val relType: String, val properties: List) { + companion object { + + private fun toProperties(propGroup: String): List = if (propGroup.isNullOrBlank()) { + emptyList() + } else { + propGroup.split(PROPERTIES_SEPARATOR) + } + + fun create(isCypherPattern: Boolean, isLeftToRight: Boolean, groupValues: List): RelationshipPatternMetaData { + lateinit var start: String + lateinit var end: String + lateinit var relType: String + lateinit var props: List + + if (isCypherPattern) { + if (isLeftToRight) { + start = groupValues[1] + end = groupValues[7] + } else { + start = groupValues[7] + end = groupValues[1] + } + relType = groupValues[3] + props = toProperties(groupValues[5]) + } else { + if (isLeftToRight) { + start = groupValues[1] + end = groupValues[5] + } else { + start = groupValues[5] + end = groupValues[1] + } + relType = groupValues[2] + props = toProperties(groupValues[4]) + } + + return RelationshipPatternMetaData(startPattern = start, + endPattern = end, relType = relType, + properties = props) + } + } + } + + fun parse(pattern: String): RelationshipPatternConfiguration { + val isCypherPattern = pattern.startsWith("(") + val regex = if (isCypherPattern) { + cypherRelationshipPatternConfigured + } else { + simpleRelationshipPatternConfigured + } + val matcher = regex.matchEntire(pattern) + if (matcher == null) { + throw IllegalArgumentException("The Relationship pattern $pattern is invalid") + } else { + val isLeftToRight = (!isCypherPattern || isUndirected(matcher) || isDirectedToRight(matcher)) + val isRightToLeft = if (isCypherPattern) isDirectedToLeft(matcher) else false + + if (!isLeftToRight && !isRightToLeft) { + throw IllegalArgumentException("The Relationship pattern $pattern has an invalid direction") + } + + val metadata = RelationshipPatternMetaData.create(isCypherPattern, isLeftToRight, matcher.groupValues) + + val start = try { + getNodeConf(metadata.startPattern) + } catch (e: Exception) { + throw IllegalArgumentException("The Relationship pattern $pattern is invalid") + } + val end = try { + getNodeConf(metadata.endPattern) + } catch (e: Exception) { + throw IllegalArgumentException("The Relationship pattern $pattern is invalid") + } + val type = getPatternConfiguredType(metadata.properties) + isHomogeneousPattern(type, metadata.properties, pattern, "Relationship") + val cleanedProperties = cleanProperties(type, metadata.properties) + return RelationshipPatternConfiguration(start = start, end = end, relType = metadata.relType, + properties = cleanedProperties, type = type) + } + } + + private fun isDirectedToLeft(matcher: MatchResult) = + (matcher.groupValues[2] == "<" && matcher.groupValues[6] == "") + + private fun isDirectedToRight(matcher: MatchResult) = + (matcher.groupValues[2] == "" && matcher.groupValues[6] == ">") + + private fun isUndirected(matcher: MatchResult) = + (matcher.groupValues[2] == "" && matcher.groupValues[6] == "") + } +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/RelationshipPatternIngestionStrategy.kt b/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/RelationshipPatternIngestionStrategy.kt new file mode 100644 index 0000000000..f8188eb78e --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/RelationshipPatternIngestionStrategy.kt @@ -0,0 +1,120 @@ +package apoc.kafka.service.sink.strategy + +import apoc.kafka.extensions.flatten +import apoc.kafka.service.StreamsSinkEntity +import apoc.kafka.utils.JSONUtils +import apoc.kafka.utils.KafkaUtil.containsProp +import apoc.kafka.utils.KafkaUtil.getLabelsAsString +import apoc.kafka.utils.KafkaUtil.getNodeMergeKeys +import apoc.kafka.utils.KafkaUtil + +class RelationshipPatternIngestionStrategy(private val relationshipPatternConfiguration: RelationshipPatternConfiguration): IngestionStrategy { + + private val mergeRelationshipTemplate: String = """ + |${KafkaUtil.UNWIND} + |MERGE (start${getLabelsAsString(relationshipPatternConfiguration.start.labels)}{${ + getNodeMergeKeys("start.keys", relationshipPatternConfiguration.start.keys) + }}) + |SET start = event.start.properties + |SET start += event.start.keys + |MERGE (end${getLabelsAsString(relationshipPatternConfiguration.end.labels)}{${ + getNodeMergeKeys("end.keys", relationshipPatternConfiguration.end.keys) + }}) + |SET end = event.end.properties + |SET end += event.end.keys + |MERGE (start)-[r:${relationshipPatternConfiguration.relType}]->(end) + |SET r = event.properties + """.trimMargin() + + private val deleteRelationshipTemplate: String = """ + |${KafkaUtil.UNWIND} + |MATCH (start${getLabelsAsString(relationshipPatternConfiguration.start.labels)}{${ + getNodeMergeKeys("start.keys", relationshipPatternConfiguration.start.keys) + }}) + |MATCH (end${getLabelsAsString(relationshipPatternConfiguration.end.labels)}{${ + getNodeMergeKeys("end.keys", relationshipPatternConfiguration.end.keys) + }}) + |MATCH (start)-[r:${relationshipPatternConfiguration.relType}]->(end) + |DELETE r + """.trimMargin() + + override fun mergeNodeEvents(events: Collection): List { + return emptyList() + } + + override fun deleteNodeEvents(events: Collection): List { + return emptyList() + } + + override fun mergeRelationshipEvents(events: Collection): List { + val data = events + .mapNotNull { if (it.value != null) JSONUtils.asMap(it.value) else null } + .mapNotNull { props -> + val properties = props.flatten() + val containsKeys = relationshipPatternConfiguration.start.keys.all { properties.containsKey(it) } + && relationshipPatternConfiguration.end.keys.all { properties.containsKey(it) } + if (containsKeys) { + val filteredProperties = when (relationshipPatternConfiguration.type) { + PatternConfigurationType.ALL -> properties.filterKeys { isRelationshipProperty(it) } + PatternConfigurationType.EXCLUDE -> properties.filterKeys { + val containsProp = containsProp(it, relationshipPatternConfiguration.properties) + isRelationshipProperty(it) && !containsProp + } + PatternConfigurationType.INCLUDE -> properties.filterKeys { + val containsProp = containsProp(it, relationshipPatternConfiguration.properties) + isRelationshipProperty(it) && containsProp + } + } + val startConf = relationshipPatternConfiguration.start + val endConf = relationshipPatternConfiguration.end + + val start = NodePatternIngestionStrategy.toData(startConf, props) + val end = NodePatternIngestionStrategy.toData(endConf, props) + + mapOf("start" to start, "end" to end, "properties" to filteredProperties) + } else { + null + } + } + return if (data.isEmpty()) { + emptyList() + } else { + listOf(QueryEvents(mergeRelationshipTemplate, data)) + } + } + + private fun isRelationshipProperty(propertyName: String): Boolean { + return (!relationshipPatternConfiguration.start.keys.contains(propertyName) + && !relationshipPatternConfiguration.start.properties.contains(propertyName) + && !relationshipPatternConfiguration.end.keys.contains(propertyName) + && !relationshipPatternConfiguration.end.properties.contains(propertyName)) + } + + override fun deleteRelationshipEvents(events: Collection): List { + val data = events + .filter { it.value == null && it.key != null } + .mapNotNull { if (it.key != null) JSONUtils.asMap(it.key) else null } + .mapNotNull { props -> + val properties = props.flatten() + val containsKeys = relationshipPatternConfiguration.start.keys.all { properties.containsKey(it) } + && relationshipPatternConfiguration.end.keys.all { properties.containsKey(it) } + if (containsKeys) { + val startConf = relationshipPatternConfiguration.start + val endConf = relationshipPatternConfiguration.end + + val start = NodePatternIngestionStrategy.toData(startConf, props) + val end = NodePatternIngestionStrategy.toData(endConf, props) + + mapOf("start" to start, "end" to end) + } else { + null + } + } + return if (data.isEmpty()) { + emptyList() + } else { + listOf(QueryEvents(deleteRelationshipTemplate, data)) + } + } + +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/SchemaIngestionStrategy.kt b/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/SchemaIngestionStrategy.kt new file mode 100644 index 0000000000..daaf717017 --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/SchemaIngestionStrategy.kt @@ -0,0 +1,185 @@ +package apoc.kafka.service.sink.strategy + +import apoc.kafka.events.* +import apoc.kafka.extensions.quote +import apoc.kafka.service.StreamsSinkEntity +import apoc.kafka.utils.KafkaUtil +import apoc.kafka.utils.KafkaUtil.getLabelsAsString +import apoc.kafka.utils.KafkaUtil.getNodeKeysAsString +import apoc.kafka.utils.KafkaUtil.getNodeKeys +import apoc.kafka.utils.KafkaUtil.toStreamsTransactionEvent + + +class SchemaIngestionStrategy: IngestionStrategy { + + private fun prepareRelationshipEvents(events: List, withProperties: Boolean = true): Map>> = events + .mapNotNull { + val payload = it.payload as RelationshipPayload + + val startNodeConstraints = getNodeConstraints(it) { + it.type == StreamsConstraintType.UNIQUE && payload.start.labels.orEmpty().contains(it.label) + } + val endNodeConstraints = getNodeConstraints(it) { + it.type == StreamsConstraintType.UNIQUE && payload.end.labels.orEmpty().contains(it.label) + } + + if (constraintsAreEmpty(startNodeConstraints, endNodeConstraints)) { + null + } else { + createRelationshipMetadata(payload, startNodeConstraints, endNodeConstraints, withProperties) + } + } + .groupBy { it.first } + .mapValues { it.value.map { it.second } } + + private fun createRelationshipMetadata(payload: RelationshipPayload, startNodeConstraints: List, endNodeConstraints: List, withProperties: Boolean): Pair>>? { + val startNodeKeys = getNodeKeys( + labels = payload.start.labels.orEmpty(), + propertyKeys = payload.start.ids.keys, + constraints = startNodeConstraints) + val endNodeKeys = getNodeKeys( + labels = payload.end.labels.orEmpty(), + propertyKeys = payload.end.ids.keys, + constraints = endNodeConstraints) + val start = payload.start.ids.filterKeys { startNodeKeys.contains(it) } + val end = payload.end.ids.filterKeys { endNodeKeys.contains(it) } + + return if (idsAreEmpty(start, end)) { + null + } else { + val value = if (withProperties) { + val properties = payload.after?.properties ?: payload.before?.properties ?: emptyMap() + mapOf("start" to start, "end" to end, "properties" to properties) + } else { + mapOf("start" to start, "end" to end) + } + val key = RelationshipSchemaMetadata( + label = payload.label, + startLabels = payload.start.labels.orEmpty().filter { label -> startNodeConstraints.any { it.label == label } }, + endLabels = payload.end.labels.orEmpty().filter { label -> endNodeConstraints.any { it.label == label } }, + startKeys = start.keys, + endKeys = end.keys + ) + key to value + } + } + + private fun idsAreEmpty(start: Map, end: Map) = + start.isEmpty() || end.isEmpty() + + private fun constraintsAreEmpty(startNodeConstraints: List, endNodeConstraints: List) = + startNodeConstraints.isEmpty() || endNodeConstraints.isEmpty() + + override fun mergeRelationshipEvents(events: Collection): List { + return prepareRelationshipEvents(events + .mapNotNull { toStreamsTransactionEvent(it) { it.payload.type == EntityType.relationship + && it.meta.operation != OperationType.deleted } }) + .map { + val label = it.key.label.quote() + val query = """ + |${KafkaUtil.UNWIND} + |MERGE (start${getLabelsAsString(it.key.startLabels)}{${getNodeKeysAsString("start", it.key.startKeys)}}) + |MERGE (end${getLabelsAsString(it.key.endLabels)}{${getNodeKeysAsString("end", it.key.endKeys)}}) + |MERGE (start)-[r:$label]->(end) + |SET r = event.properties + """.trimMargin() + QueryEvents(query, it.value) + } + } + + override fun deleteRelationshipEvents(events: Collection): List { + return prepareRelationshipEvents(events + .mapNotNull { toStreamsTransactionEvent(it) { it.payload.type == EntityType.relationship + && it.meta.operation == OperationType.deleted } }, false) + .map { + val label = it.key.label.quote() + val query = """ + |${KafkaUtil.UNWIND} + |MATCH (start${getLabelsAsString(it.key.startLabels)}{${getNodeKeysAsString("start", it.key.startKeys)}}) + |MATCH (end${getLabelsAsString(it.key.endLabels)}{${getNodeKeysAsString("end", it.key.endKeys)}}) + |MATCH (start)-[r:$label]->(end) + |DELETE r + """.trimMargin() + QueryEvents(query, it.value) + } + } + + override fun deleteNodeEvents(events: Collection): List { + return events + .mapNotNull { toStreamsTransactionEvent(it) { it.payload.type == EntityType.node && it.meta.operation == OperationType.deleted } } + .mapNotNull { + val changeEvtBefore = it.payload.before as NodeChange + val constraints = getNodeConstraints(it) { it.type == StreamsConstraintType.UNIQUE } + if (constraints.isEmpty()) { + null + } else { + constraints to mapOf("properties" to changeEvtBefore.properties) + } + } + .groupBy({ it.first }, { it.second }) + .map { + val labels = it.key.mapNotNull { it.label } + val nodeKeys = it.key.flatMap { it.properties }.toSet() + val query = """ + |${KafkaUtil.UNWIND} + |MATCH (n${getLabelsAsString(labels)}{${getNodeKeysAsString(keys = nodeKeys)}}) + |DETACH DELETE n + """.trimMargin() + QueryEvents(query, it.value) + } + } + + override fun mergeNodeEvents(events: Collection): List { + val filterLabels: (List, List) -> List = { labels, constraints -> + labels.filter { label -> !constraints.any { constraint -> constraint.label == label } } + .map { it.quote() } + } + return events + .mapNotNull { toStreamsTransactionEvent(it) { it.payload.type == EntityType.node && it.meta.operation != OperationType.deleted } } + .mapNotNull { + val changeEvtAfter = it.payload.after as NodeChange + val labelsAfter = changeEvtAfter.labels ?: emptyList() + val labelsBefore = (it.payload.before as? NodeChange)?.labels.orEmpty() + + val constraints = getNodeConstraints(it) { it.type == StreamsConstraintType.UNIQUE } + if (constraints.isEmpty()) { + null + } else { + val labelsToAdd = filterLabels((labelsAfter - labelsBefore), constraints) + val labelsToDelete = filterLabels((labelsBefore - labelsAfter), constraints) + + val propertyKeys = changeEvtAfter.properties?.keys ?: emptySet() + val keys = getNodeKeys(labelsAfter, propertyKeys, constraints) + + if (keys.isEmpty()) { + null + } else { + val key = NodeSchemaMetadata(constraints = constraints, + labelsToAdd = labelsToAdd, labelsToDelete = labelsToDelete, + keys = keys) + val value = mapOf("properties" to changeEvtAfter.properties) + key to value + } + } + } + .groupBy({ it.first }, { it.second }) + .map { map -> + var query = """ + |${KafkaUtil.UNWIND} + |MERGE (n${getLabelsAsString(map.key.constraints.mapNotNull { it.label })}{${getNodeKeysAsString(keys = map.key.keys)}}) + |SET n = event.properties + """.trimMargin() + if (map.key.labelsToAdd.isNotEmpty()) { + query += "\nSET n${getLabelsAsString(map.key.labelsToAdd)}" + } + if (map.key.labelsToDelete.isNotEmpty()) { + query += "\nREMOVE n${getLabelsAsString(map.key.labelsToDelete)}" + } + QueryEvents(query, map.value) + } + } + + private fun getNodeConstraints(event: StreamsTransactionEvent, + filter: (Constraint) -> Boolean): List = event.schema.constraints.filter { filter(it) } + +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/SourceIdIngestionStrategy.kt b/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/SourceIdIngestionStrategy.kt new file mode 100644 index 0000000000..ac426953ae --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/service/sink/strategy/SourceIdIngestionStrategy.kt @@ -0,0 +1,110 @@ +package apoc.kafka.service.sink.strategy + +import apoc.kafka.events.EntityType +import apoc.kafka.events.NodeChange +import apoc.kafka.events.OperationType +import apoc.kafka.events.RelationshipChange +import apoc.kafka.events.RelationshipPayload +import apoc.kafka.extensions.quote +import apoc.kafka.service.StreamsSinkEntity +import apoc.kafka.utils.KafkaUtil +import apoc.kafka.utils.KafkaUtil.getLabelsAsString +import apoc.kafka.utils.KafkaUtil.toStreamsTransactionEvent + +data class SourceIdIngestionStrategyConfig(val labelName: String = "SourceEvent", val idName: String = "sourceId") + +class SourceIdIngestionStrategy(config: SourceIdIngestionStrategyConfig = SourceIdIngestionStrategyConfig()): IngestionStrategy { + + private val quotedLabelName = config.labelName.quote() + private val quotedIdName = config.idName.quote() + + override fun mergeRelationshipEvents(events: Collection): List { + return events + .mapNotNull { toStreamsTransactionEvent(it) { it.payload.type == EntityType.relationship && it.meta.operation != OperationType.deleted } } + .map { data -> + val payload = data.payload as RelationshipPayload + val changeEvt = when (data.meta.operation) { + OperationType.deleted -> { + data.payload.before as RelationshipChange + } + else -> data.payload.after as RelationshipChange + } + payload.label to mapOf("id" to payload.id, + "start" to payload.start.id, "end" to payload.end.id, "properties" to changeEvt.properties) + } + .groupBy({ it.first }, { it.second }) + .map { + val query = """ + |${KafkaUtil.UNWIND} + |MERGE (start:$quotedLabelName{$quotedIdName: event.start}) + |MERGE (end:$quotedLabelName{$quotedIdName: event.end}) + |MERGE (start)-[r:${it.key.quote()}{$quotedIdName: event.id}]->(end) + |SET r = event.properties + |SET r.$quotedIdName = event.id + """.trimMargin() + QueryEvents(query, it.value) + } + } + + override fun deleteRelationshipEvents(events: Collection): List { + return events + .mapNotNull { toStreamsTransactionEvent(it) { it.payload.type == EntityType.relationship && it.meta.operation == OperationType.deleted } } + .map { data -> + val payload = data.payload as RelationshipPayload + payload.label to mapOf("id" to data.payload.id) + } + .groupBy({ it.first }, { it.second }) + .map { + val query = "${KafkaUtil.UNWIND} MATCH ()-[r:${it.key.quote()}{$quotedIdName: event.id}]-() DELETE r" + QueryEvents(query, it.value) + } + } + + override fun deleteNodeEvents(events: Collection): List { + val data = events + .mapNotNull { toStreamsTransactionEvent(it) { it.payload.type == EntityType.node && it.meta.operation == OperationType.deleted } } + .map { mapOf("id" to it.payload.id) } + if (data.isNullOrEmpty()) { + return emptyList() + } + val query = "${KafkaUtil.UNWIND} MATCH (n:$quotedLabelName{$quotedIdName: event.id}) DETACH DELETE n" + return listOf(QueryEvents(query, data)) + } + + override fun mergeNodeEvents(events: Collection): List { + return events + .mapNotNull { toStreamsTransactionEvent(it) { it.payload.type == EntityType.node && it.meta.operation != OperationType.deleted } } + .map { data -> + val changeEvtAfter = data.payload.after as NodeChange + val labelsAfter = changeEvtAfter.labels ?: emptyList() + val labelsBefore = if (data.payload.before != null) { + val changeEvtBefore = data.payload.before as NodeChange + changeEvtBefore.labels ?: emptyList() + } else { + emptyList() + } + val labelsToAdd = (labelsAfter - labelsBefore) + .toSet() + val labelsToDelete = (labelsBefore - labelsAfter) + .toSet() + NodeMergeMetadata(labelsToAdd = labelsToAdd, labelsToDelete = labelsToDelete) to mapOf("id" to data.payload.id, "properties" to changeEvtAfter.properties) + } + .groupBy({ it.first }, { it.second }) + .map { + var query = """ + |${KafkaUtil.UNWIND} + |MERGE (n:$quotedLabelName{$quotedIdName: event.id}) + |SET n = event.properties + |SET n.$quotedIdName = event.id + """.trimMargin() + if (it.key.labelsToDelete.isNotEmpty()) { + query += "\nREMOVE n${getLabelsAsString(it.key.labelsToDelete)}" + } + if (it.key.labelsToAdd.isNotEmpty()) { + query += "\nSET n${getLabelsAsString(it.key.labelsToAdd)}" + } + QueryEvents(query, it.value) + } + } + +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/utils/JSONUtils.kt b/extended/src/main/kotlin/apoc/kafka/utils/JSONUtils.kt new file mode 100644 index 0000000000..cbf167772a --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/utils/JSONUtils.kt @@ -0,0 +1,146 @@ +package apoc.kafka.utils + +import apoc.kafka.events.StreamsTransactionEvent +import apoc.kafka.events.StreamsTransactionNodeEvent +import apoc.kafka.events.StreamsTransactionRelationshipEvent +import com.fasterxml.jackson.core.JsonGenerator +import com.fasterxml.jackson.core.JsonProcessingException +import com.fasterxml.jackson.databind.DeserializationFeature +import com.fasterxml.jackson.databind.JsonSerializer +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.databind.SerializationFeature +import com.fasterxml.jackson.databind.SerializerProvider +import com.fasterxml.jackson.databind.module.SimpleModule +import com.fasterxml.jackson.module.kotlin.convertValue +import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper +import com.fasterxml.jackson.module.kotlin.readValue +import org.neo4j.driver.internal.value.PointValue +import org.neo4j.graphdb.spatial.Point +import org.neo4j.values.storable.CoordinateReferenceSystem +import java.io.IOException +import java.time.temporal.TemporalAccessor + +abstract class StreamsPoint { abstract val crs: String } +data class StreamsPointCartesian(override val crs: String, val x: Double, val y: Double, val z: Double? = null): StreamsPoint() +data class StreamsPointWgs(override val crs: String, val latitude: Double, val longitude: Double, val height: Double? = null): StreamsPoint() + +fun Point.toStreamsPoint(): StreamsPoint { + val crsType = this.crs.type + val coordinate = this.coordinates[0].coordinate + return when (this.crs) { + CoordinateReferenceSystem.CARTESIAN -> StreamsPointCartesian(crsType, coordinate[0], coordinate[1]) + CoordinateReferenceSystem.CARTESIAN_3D -> StreamsPointCartesian(crsType, coordinate[0], coordinate[1], coordinate[2]) + CoordinateReferenceSystem.WGS_84 -> StreamsPointWgs(crsType, coordinate[0], coordinate[1]) + CoordinateReferenceSystem.WGS_84_3D -> StreamsPointWgs(crsType, coordinate[0], coordinate[1], coordinate[2]) + else -> throw IllegalArgumentException("Point type $crsType not supported") + } +} + +fun PointValue.toStreamsPoint(): StreamsPoint { + val point = this.asPoint() + return when (val crsType = point.srid()) { + CoordinateReferenceSystem.CARTESIAN.code -> StreamsPointCartesian(CoordinateReferenceSystem.CARTESIAN.name, point.x(), point.y()) + CoordinateReferenceSystem.CARTESIAN_3D.code -> StreamsPointCartesian(CoordinateReferenceSystem.CARTESIAN_3D.name, point.x(), point.y(), point.z()) + CoordinateReferenceSystem.WGS_84.code -> StreamsPointWgs(CoordinateReferenceSystem.WGS_84.name, point.x(), point.y()) + CoordinateReferenceSystem.WGS_84_3D.code -> StreamsPointWgs(CoordinateReferenceSystem.WGS_84_3D.name, point.x(), point.y(), point.z()) + else -> throw IllegalArgumentException("Point type $crsType not supported") + } +} + +class PointSerializer : JsonSerializer() { + @Throws(IOException::class, JsonProcessingException::class) + override fun serialize(value: Point?, jgen: JsonGenerator, + provider: SerializerProvider) { + if (value == null) { + return + } + jgen.writeObject(value.toStreamsPoint()) + } +} + +class PointValueSerializer : JsonSerializer() { + @Throws(IOException::class, JsonProcessingException::class) + override fun serialize(value: PointValue?, jgen: JsonGenerator, + provider: SerializerProvider) { + if (value == null) { + return + } + jgen.writeObject(value.toStreamsPoint()) + } +} + +class TemporalAccessorSerializer : JsonSerializer() { + @Throws(IOException::class, JsonProcessingException::class) + override fun serialize(value: TemporalAccessor?, jgen: JsonGenerator, + provider: SerializerProvider) { + if (value == null) { + return + } + jgen.writeString(value.toString()) + } +} + +/** + * NOTE: it works differently from apoc.JSONUtil + */ +object JSONUtils { + + private val OBJECT_MAPPER: ObjectMapper = jacksonObjectMapper() + private val STRICT_OBJECT_MAPPER: ObjectMapper = jacksonObjectMapper() + + init { + val module = SimpleModule("Neo4jKafkaSerializer") + KafkaUtil.ignoreExceptions({ module.addSerializer(Point::class.java, PointSerializer()) }, NoClassDefFoundError::class.java) // in case is loaded from + KafkaUtil.ignoreExceptions({ module.addSerializer(PointValue::class.java, PointValueSerializer()) }, NoClassDefFoundError::class.java) // in case is loaded from + module.addSerializer(TemporalAccessor::class.java, TemporalAccessorSerializer()) + OBJECT_MAPPER.registerModule(module) + OBJECT_MAPPER.disable(SerializationFeature.FAIL_ON_EMPTY_BEANS) + OBJECT_MAPPER.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) + STRICT_OBJECT_MAPPER.registerModule(module) + } + + fun getObjectMapper(): ObjectMapper = OBJECT_MAPPER + + fun getStrictObjectMapper(): ObjectMapper = STRICT_OBJECT_MAPPER + + fun asMap(any: Any): Map { + return OBJECT_MAPPER.convertValue(any, Map::class.java) + .mapKeys { it.key.toString() } + } + + fun writeValueAsString(any: Any): String { + return OBJECT_MAPPER.writeValueAsString(any) + } + + fun writeValueAsBytes(any: Any): ByteArray { + return OBJECT_MAPPER.writeValueAsBytes(any) + } + + fun readValue(value: ByteArray, valueType: Class?): T { + return getObjectMapper().readValue(value, valueType) + } + + fun readValue(value: ByteArray): Any { + return getObjectMapper().readValue(value) + } + + inline fun convertValue(value: Any, objectMapper: ObjectMapper = getObjectMapper()): T { + return objectMapper.convertValue(value) + } + + fun asStreamsTransactionEvent(obj: Any): StreamsTransactionEvent { + return try { + val evt = when (obj) { + is String, is ByteArray -> STRICT_OBJECT_MAPPER.readValue(obj as ByteArray, StreamsTransactionNodeEvent::class.java) + else -> STRICT_OBJECT_MAPPER.convertValue(obj, StreamsTransactionNodeEvent::class.java) + } + evt.toStreamsTransactionEvent() + } catch (e: Exception) { + val evt = when (obj) { + is String, is ByteArray -> STRICT_OBJECT_MAPPER.readValue(obj as ByteArray, StreamsTransactionRelationshipEvent::class.java) + else -> STRICT_OBJECT_MAPPER.convertValue(obj, StreamsTransactionRelationshipEvent::class.java) + } + evt.toStreamsTransactionEvent() + } + } +} \ No newline at end of file diff --git a/extended/src/main/kotlin/apoc/kafka/utils/KafkaUtil.kt b/extended/src/main/kotlin/apoc/kafka/utils/KafkaUtil.kt new file mode 100644 index 0000000000..e809d5863b --- /dev/null +++ b/extended/src/main/kotlin/apoc/kafka/utils/KafkaUtil.kt @@ -0,0 +1,341 @@ +package apoc.kafka.utils + +import apoc.ApocConfig +import apoc.ExtendedApocConfig.APOC_KAFKA_ENABLED +import apoc.kafka.events.Constraint +import apoc.kafka.events.RelKeyStrategy +import apoc.kafka.events.StreamsConstraintType +import apoc.kafka.events.StreamsTransactionEvent +import apoc.kafka.extensions.execute +import apoc.kafka.extensions.quote +import apoc.kafka.service.StreamsSinkEntity +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.GlobalScope +import kotlinx.coroutines.delay +import kotlinx.coroutines.launch +import kotlinx.coroutines.runBlocking +import org.apache.kafka.clients.CommonClientConfigs +import org.apache.kafka.clients.admin.AdminClient +import org.apache.kafka.clients.admin.AdminClientConfig +import org.apache.kafka.clients.consumer.ConsumerConfig +import org.apache.kafka.clients.producer.ProducerConfig +import org.apache.kafka.common.config.ConfigResource +import org.apache.kafka.common.config.SaslConfigs +import org.apache.kafka.common.config.SslConfigs +import org.apache.kafka.common.config.TopicConfig +import org.neo4j.dbms.api.DatabaseManagementService +import org.neo4j.dbms.systemgraph.TopologyGraphDbmsModel.HostedOnMode +import org.neo4j.exceptions.UnsatisfiedDependencyException +import org.neo4j.graphdb.GraphDatabaseService +import org.neo4j.graphdb.QueryExecutionException +import org.neo4j.kernel.internal.GraphDatabaseAPI +import org.neo4j.logging.Log +import org.neo4j.logging.internal.LogService +import java.io.IOException +import java.lang.invoke.MethodHandles +import java.lang.invoke.MethodType +import java.lang.reflect.Modifier +import java.net.Socket +import java.net.URI +import java.util.* + +object KafkaUtil { + const val labelSeparator = ":" + const val keySeparator = ", " + + @JvmStatic val UNWIND: String = "UNWIND \$events AS event" + + @JvmStatic val WITH_EVENT_FROM: String = "WITH event, from" + + @JvmStatic val LEADER = "LEADER" + + @JvmStatic val SYSTEM_DATABASE_NAME = "system" + + @JvmStatic + private val coreMetadata: Class<*>? = try { + Class.forName("com.neo4j.causalclustering.core.consensus.CoreMetaData") + } catch (e: ClassNotFoundException) { + null + } + + @JvmStatic + private val isLeaderMethodHandle = coreMetadata?.let { + val lookup = MethodHandles.lookup() + lookup.findVirtual(it, "isLeader", MethodType.methodType(Boolean::class.java)) + .asType(MethodType.methodType(Boolean::class.java, Any::class.java)) + } + + fun clusterMemberRole(db: GraphDatabaseAPI): String { + val fallback: (Exception?) -> String = { e: Exception? -> + val userLog = db.dependencyResolver + .resolveDependency(LogService::class.java) + .getUserLog(KafkaUtil::class.java) + e?.let { userLog.warn("Cannot call the APIs, trying with the cypher query", e) } + ?: userLog.warn("Cannot call the APIs, trying with the cypher query") + db.execute("CALL dbms.cluster.role(\$database)", + mapOf("database" to db.databaseName()) + ) { it.columnAs("role").next() } + } + val execute = { + coreMetadata?.let { + try { + val raftMachine: Any = db.dependencyResolver.resolveDependency(coreMetadata) + val isLeader = isLeaderMethodHandle!!.invokeExact(raftMachine) as Boolean + if (isLeader) "LEADER" else "FOLLOWER" + } catch (e: UnsatisfiedDependencyException) { + "LEADER" + } + } ?: "LEADER" + } + return executeOrFallback(execute, fallback) + } + + fun isCluster(db: GraphDatabaseAPI): Boolean = db.mode() != HostedOnMode.SINGLE && db.mode() != HostedOnMode.VIRTUAL + + fun isCluster(dbms: DatabaseManagementService): Boolean = dbms.listDatabases() + .firstOrNull { it != KafkaUtil.SYSTEM_DATABASE_NAME } + ?.let { dbms.database(it) as GraphDatabaseAPI } + ?.let { isCluster(it) } ?: false + + private fun executeOrFallback(execute: () -> T, fallback: (Exception?) -> T): T = try { + execute() + } catch (e: Exception) { + fallback(e) + } + + fun getLabelsAsString(labels: Collection): String = labels + .map { it.quote() } + .joinToString(labelSeparator) + .let { if (it.isNotBlank()) "$labelSeparator$it" else it } + + fun getNodeKeysAsString(prefix: String = "properties", keys: Set): String = keys + .map { toQuotedProperty(prefix, it) } + .joinToString(keySeparator) + + private fun toQuotedProperty(prefix: String = "properties", property: String): String { + val quoted = property.quote() + return "$quoted: event.$prefix.$quoted" + } + + fun getNodeMergeKeys(prefix: String, keys: Set): String = keys + .map { + val quoted = it.quote() + "$quoted: event.$prefix.$quoted" + } + .joinToString(keySeparator) + + fun containsProp(key: String, properties: List): Boolean = if (key.contains(".")) { + properties.contains(key) || properties.any { key.startsWith("$it.") } + } else { + properties.contains(key) + } + + suspend fun retryForException(exceptions: Array>, retries: Int, delayTime: Long, action: () -> T): T { + return try { + action() + } catch (e: Exception) { + val isInstance = exceptions.any { it.isInstance(e) } + if (isInstance && retries > 0) { + delay(delayTime) + retryForException(exceptions = exceptions, retries = retries - 1, delayTime = delayTime, action = action) + } else { + throw e + } + } + } + + fun isServerReachable(url: String, port: Int): Boolean = try { + Socket(url, port).use { true } + } catch (e: IOException) { + false + } + + fun checkServersUnreachable(urls: String, separator: String = ","): List = urls + .split(separator) + .map { + val uri = URI.create(it) + when (uri.host.isNullOrBlank()) { + true -> { + val splitted = it.split(":") + URI("fake-scheme", "", splitted.first(), splitted.last().toInt(), + "", "", "") + } + else -> uri + } + } + .filter { uri -> !isServerReachable(uri.host, uri.port) } + .map { if (it.scheme == "fake-scheme") "${it.host}:${it.port}" else it.toString() } + + fun validateConnection(url: String, kafkaPropertyKey: String, checkReachable: Boolean = true) { + if (url.isBlank()) { + throw RuntimeException("The `kafka.$kafkaPropertyKey` property is empty") + } else if (checkReachable) { + val unreachableServers = checkServersUnreachable(url) + if (unreachableServers.isNotEmpty()) { + throw RuntimeException("The servers defined into the property `kafka.$kafkaPropertyKey` are not reachable: $unreachableServers") + } + } + } + + fun getInvalidTopicsError(invalidTopics: List) = "The BROKER config `auto.create.topics.enable` is false, the following topics need to be created into the Kafka cluster otherwise the messages will be discarded: $invalidTopics" + + fun getInvalidTopics(kafkaProps: Properties, allTopics: List): List = try { + getInvalidTopics(AdminClient.create(kafkaProps), allTopics) + } catch (e: Exception) { + emptyList() + } + + fun getInvalidTopics(client: AdminClient, allTopics: List): List = try { + val kafkaTopics = client.listTopics().names().get() + val invalidTopics = allTopics.filter { !kafkaTopics.contains(it) } + if (invalidTopics.isNotEmpty() && isAutoCreateTopicsEnabled(client)) { + emptyList() + } else { + invalidTopics + } + } catch (e: Exception) { + emptyList() + } + + fun checkEnabled() { + if (!ApocConfig.apocConfig().getBoolean(APOC_KAFKA_ENABLED)) { + throw RuntimeException("In order to use the Kafka procedures you must set ${APOC_KAFKA_ENABLED}=true") + } + } + + fun isAutoCreateTopicsEnabled(kafkaProps: Properties):Boolean = try { + isAutoCreateTopicsEnabled(AdminClient.create(kafkaProps)) + } catch (e: Exception) { + false + } + + fun isAutoCreateTopicsEnabled(client: AdminClient): Boolean = try { + val firstNodeId = client.describeCluster().nodes().get().first().id() + val configResources = listOf(ConfigResource(ConfigResource.Type.BROKER, firstNodeId.toString())) + val configs = client.describeConfigs(configResources).all().get() + configs.values + .flatMap { it.entries() } + .find { it.name() == "auto.create.topics.enable" } + ?.value() + ?.toBoolean() ?: false + } catch (e: Exception) { + false + } + + private fun getConfigProperties(clazz: Class<*>) = clazz.declaredFields + .filter { Modifier.isStatic(it.modifiers) && it.name.endsWith("_CONFIG") } + .map { it.get(null).toString() } + .toSet() + + private fun getBaseConfigs() = (getConfigProperties(CommonClientConfigs::class.java) + + AdminClientConfig.configNames() + + getConfigProperties(SaslConfigs::class.java) + + getConfigProperties(TopicConfig::class.java) + + getConfigProperties(SslConfigs::class.java)) + + fun getProducerProperties() = ProducerConfig.configNames() - getBaseConfigs() + + fun getConsumerProperties() = ConsumerConfig.configNames() - getBaseConfigs() + + fun getNodeKeys(labels: List, propertyKeys: Set, constraints: List, keyStrategy: RelKeyStrategy = RelKeyStrategy.DEFAULT): Set = + constraints + .filter { constraint -> + constraint.type == StreamsConstraintType.UNIQUE + && propertyKeys.containsAll(constraint.properties) + && labels.contains(constraint.label) + } + .let { + when(keyStrategy) { + RelKeyStrategy.DEFAULT -> { + // we order first by properties.size, then by label name and finally by properties name alphabetically + // with properties.sorted() we ensure that ("foo", "bar") and ("bar", "foo") are no different + // with toString() we force it.properties to have the natural sort order, that is alphabetically + it.minWithOrNull((compareBy({ it.properties.size }, { it.label }, { it.properties.sorted().toString() }))) + ?.properties + .orEmpty() + } + // with 'ALL' strategy we get a set with all properties + RelKeyStrategy.ALL -> it.flatMap { it.properties }.toSet() + } + } + + + fun toStreamsTransactionEvent(streamsSinkEntity: StreamsSinkEntity, + evaluation: (StreamsTransactionEvent) -> Boolean) + : StreamsTransactionEvent? = if (streamsSinkEntity.value != null) { + val data = JSONUtils.asStreamsTransactionEvent(streamsSinkEntity.value) + if (evaluation(data)) data else null + } else { + null + } + + fun ignoreExceptions(action: () -> T, vararg toIgnore: Class): T? { + return try { + action() + } catch (e: Throwable) { + if (toIgnore.isEmpty()) { + return null + } + return if (toIgnore.any { it.isInstance(e) }) { + null + } else { + throw e + } + } + } + + fun blockUntilFalseOrTimeout(timeout: Long, delay: Long = 1000, action: () -> Boolean): Boolean = runBlocking { + val start = System.currentTimeMillis() + var success = action() + while (System.currentTimeMillis() - start < timeout && !success) { + delay(delay) + success = action() + } + success + } + + fun getName(db: GraphDatabaseService) = db.databaseName() + + fun isWriteableInstance(db: GraphDatabaseAPI) = apoc.util.Util.isWriteableInstance(db) + + private fun clusterHasLeader(db: GraphDatabaseAPI): Boolean = try { + db.execute(""" + |CALL dbms.cluster.overview() YIELD databases + |RETURN databases[${'$'}database] AS role + """.trimMargin(), mapOf("database" to db.databaseName())) { + it.columnAs("role") + .stream() + .toList() + .contains(KafkaUtil.LEADER) + } + } catch (e: QueryExecutionException) { + if (e.statusCode.equals("Neo.ClientError.Procedure.ProcedureNotFound", ignoreCase = true)) { + false + } + throw e + } + + fun executeInWriteableInstance(db: GraphDatabaseAPI, + action: () -> T?): T? = if (isWriteableInstance(db)) { + action() + } else { + null + } + + fun isClusterCorrectlyFormed(dbms: DatabaseManagementService) = dbms.listDatabases() + .filterNot { it == KafkaUtil.SYSTEM_DATABASE_NAME } + .map { dbms.database(it) as GraphDatabaseAPI } + .all { clusterHasLeader(it) } + + fun waitForTheLeaders(dbms: DatabaseManagementService, log: Log, timeout: Long = 120000, action: () -> Unit) { + GlobalScope.launch(Dispatchers.IO) { + val start = System.currentTimeMillis() + val delay: Long = 2000 + while (!isClusterCorrectlyFormed(dbms) && System.currentTimeMillis() - start < timeout) { + log.info("${KafkaUtil.LEADER} not found, new check comes in $delay milliseconds...") + delay(delay) + } + action() + } + } +} \ No newline at end of file diff --git a/extended/src/main/resources/extendedCypher25.txt b/extended/src/main/resources/extendedCypher25.txt index 90f2de2ebb..ecb889ded9 100644 --- a/extended/src/main/resources/extendedCypher25.txt +++ b/extended/src/main/resources/extendedCypher25.txt @@ -286,4 +286,7 @@ apoc.vectordb.weaviate.getAndUpdate apoc.vectordb.weaviate.info apoc.vectordb.weaviate.query apoc.vectordb.weaviate.queryAndUpdate -apoc.vectordb.weaviate.upsert \ No newline at end of file +apoc.vectordb.weaviate.upsert +apoc.kafka.consume +apoc.kafka.publish +apoc.kafka.publish.sync \ No newline at end of file diff --git a/extended/src/main/resources/extendedCypher5.txt b/extended/src/main/resources/extendedCypher5.txt index 718df96bff..2460fedf34 100644 --- a/extended/src/main/resources/extendedCypher5.txt +++ b/extended/src/main/resources/extendedCypher5.txt @@ -276,4 +276,7 @@ apoc.vectordb.milvus.queryAndUpdate apoc.vectordb.milvus.info apoc.vectordb.custom.get apoc.vectordb.custom -apoc.vectordb.configure \ No newline at end of file +apoc.vectordb.configure +apoc.kafka.consume +apoc.kafka.publish +apoc.kafka.publish.sync \ No newline at end of file diff --git a/extended/src/test/java/apoc/util/ExtendedTestUtil.java b/extended/src/test/java/apoc/util/ExtendedTestUtil.java index 9aeb6747a5..c9c5a95b0d 100644 --- a/extended/src/test/java/apoc/util/ExtendedTestUtil.java +++ b/extended/src/test/java/apoc/util/ExtendedTestUtil.java @@ -151,6 +151,30 @@ public static void assertFails(GraphDatabaseService db, String query, Map T readValue(byte[] json, Class clazz) { + try { + return JsonUtil.OBJECT_MAPPER.readValue(json, clazz); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public static Object readValue(byte[] json) { + try { + return JsonUtil.OBJECT_MAPPER.readValue(json, Object.class); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + public static void clearDb(DbmsRule db) { db.executeTransactionally("MATCH (n) DETACH DELETE n"); } diff --git a/extended/src/test/kotlin/apoc/kafka/common/CommonExtensionsTest.kt b/extended/src/test/kotlin/apoc/kafka/common/CommonExtensionsTest.kt new file mode 100644 index 0000000000..141aca627a --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/CommonExtensionsTest.kt @@ -0,0 +1,74 @@ +package apoc.kafka.common + +import apoc.kafka.extensions.toMap +import org.apache.avro.SchemaBuilder +import org.apache.avro.generic.GenericRecordBuilder +import org.junit.Test +import kotlin.test.assertEquals + +class CommonExtensionsTest { + + @Test + fun `should convert AVRO record to Map`() { + // given + // this test generates a simple tree structure like this + // body + // / \ + // p ul + // | + // li + val BODY_SCHEMA = SchemaBuilder.builder("org.neo4j.example.html") + .record("BODY").fields() + .name("ul").type().array().items() + .record("UL").namespace("org.neo4j.example.html").fields() + .name("value").type().array().items() + .record("LI").namespace("org.neo4j.example.html").fields() + .optionalString("value") + .name("class").type().nullable().array().items().stringType().noDefault() + .endRecord().noDefault() + .endRecord().noDefault() + .name("p").type().array().items() + .record("P").namespace("org.neo4j.example.html").fields() + .optionalString("value") + .endRecord().noDefault() + .endRecord() + val UL_SCHEMA = BODY_SCHEMA.getField("ul").schema().elementType + val LI_SCHEMA = UL_SCHEMA.getField("value").schema().elementType + val firstLi = listOf( + GenericRecordBuilder(LI_SCHEMA).set("value", "First UL - First Element").set("class", null).build(), + GenericRecordBuilder(LI_SCHEMA).set("value", "First UL - Second Element").set("class", listOf("ClassA", "ClassB")).build() + ) + val secondLi = listOf( + GenericRecordBuilder(LI_SCHEMA).set("value", "Second UL - First Element").set("class", null).build(), + GenericRecordBuilder(LI_SCHEMA).set("value", "Second UL - Second Element").set("class", null).build() + ) + val structUL = listOf( + GenericRecordBuilder(UL_SCHEMA).set("value", firstLi).build(), + GenericRecordBuilder(UL_SCHEMA).set("value", secondLi).build() + ) + val structP = listOf( + GenericRecordBuilder(BODY_SCHEMA.getField("p").schema().elementType).set("value", "First Paragraph").build(), + GenericRecordBuilder(BODY_SCHEMA.getField("p").schema().elementType).set("value", "Second Paragraph").build() + ) + val struct = GenericRecordBuilder(BODY_SCHEMA) + .set("ul", structUL) + .set("p", structP) + .build() + + // when + val actual = struct.toMap() + + // then + val firstULMap = mapOf("value" to listOf( + mapOf("value" to "First UL - First Element", "class" to null), + mapOf("value" to "First UL - Second Element", "class" to listOf("ClassA", "ClassB")))) + val secondULMap = mapOf("value" to listOf( + mapOf("value" to "Second UL - First Element", "class" to null), + mapOf("value" to "Second UL - Second Element", "class" to null))) + val ulListMap = listOf(firstULMap, secondULMap) + val pListMap = listOf(mapOf("value" to "First Paragraph"), + mapOf("value" to "Second Paragraph")) + val bodyMap = mapOf("ul" to ulListMap, "p" to pListMap) + assertEquals(bodyMap, actual) + } +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/common/errors/KafkaErrorServiceTest.kt b/extended/src/test/kotlin/apoc/kafka/common/errors/KafkaErrorServiceTest.kt new file mode 100644 index 0000000000..754e29d7c6 --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/errors/KafkaErrorServiceTest.kt @@ -0,0 +1,83 @@ +package apoc.kafka.common.errors + +import apoc.kafka.service.errors.ErrorData +import apoc.kafka.service.errors.ErrorService +import apoc.kafka.service.errors.KafkaErrorService +import org.apache.commons.lang3.exception.ExceptionUtils +import org.apache.kafka.clients.producer.MockProducer +import org.apache.kafka.clients.producer.ProducerRecord +import org.apache.kafka.clients.producer.internals.FutureRecordMetadata +import org.apache.kafka.common.record.RecordBatch +import org.apache.kafka.common.utils.SystemTime +import org.junit.Test +import org.mockito.ArgumentMatchers +import org.mockito.Mockito +import java.util.* +import java.util.concurrent.atomic.AtomicInteger +import kotlin.test.assertEquals + +class KafkaErrorServiceTest { + @Test + fun `should send the data to the DLQ`() { + val producer: MockProducer = Mockito.mock(MockProducer::class.java) as MockProducer + val counter = AtomicInteger(0) + Mockito.`when`(producer.send(ArgumentMatchers.any>())).then { + counter.incrementAndGet() + FutureRecordMetadata(null, 0, RecordBatch.NO_TIMESTAMP, 0L, 0, 0, SystemTime()) + } + val dlqService = KafkaErrorService(producer, ErrorService.ErrorConfig(fail=false,dlqTopic = "dlqTopic"), { s, e -> }) + dlqService.report(listOf(dlqData())) + assertEquals(1, counter.get()) + dlqService.close() + } + + + @Test + fun `should create the header map`() { + val producer: MockProducer = Mockito.mock(MockProducer::class.java) as MockProducer + val dlqService = KafkaErrorService(producer, ErrorService.ErrorConfig(fail=false, dlqTopic = "dlqTopic",dlqHeaders = true), { s, e -> }) + val dlqData = dlqData() + val map = dlqService.populateContextHeaders(dlqData) + assertEquals(String(map["topic"]!!), dlqData.originalTopic) + assertEquals(String(map["partition"]!!), dlqData.partition) + assertEquals(String(map["offset"]!!), dlqData.offset) + assertEquals(String(map["class.name"]!!), KafkaErrorServiceTest::class.java.name) + val exception = dlqData.exception!! + assertEquals(String(map["exception.class.name"]!!), exception::class.java.name) + assertEquals(String(map["exception.message"]!!), exception.message) + assertEquals(String(map["exception.stacktrace"]!!), ExceptionUtils.getStackTrace(exception)) + + } + + private fun dlqData(): ErrorData { + val offset = "0" + val originalTopic = "topicName" + val partition = "1" + val timestamp = System.currentTimeMillis() + val exception = RuntimeException("Test") + val key = "KEY" + val value = "VALUE" + val databaseName = "myDb" + return ErrorData( + offset = offset, + originalTopic = originalTopic, + partition = partition, + timestamp = timestamp, + exception = exception, + executingClass = KafkaErrorServiceTest::class.java, + key = key.toByteArray(), + value = value.toByteArray(), + databaseName = databaseName + ) + } + + @Test + fun `should log DQL data`() { + val log = { s:String,e:Exception? -> run { + RuntimeException("Test") + Unit + }} + val logService = KafkaErrorService(Properties(),ErrorService.ErrorConfig(fail = false,log=true), log) + logService.report(listOf(dlqData())) + } +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/common/strategy/CUDIngestionStrategyTest.kt b/extended/src/test/kotlin/apoc/kafka/common/strategy/CUDIngestionStrategyTest.kt new file mode 100644 index 0000000000..e2216d6d2e --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/strategy/CUDIngestionStrategyTest.kt @@ -0,0 +1,1185 @@ +package apoc.kafka.common.strategy + +import apoc.kafka.extensions.quote +import apoc.kafka.service.StreamsSinkEntity +import apoc.kafka.service.sink.strategy.CUDIngestionStrategy +import apoc.kafka.service.sink.strategy.CUDNode +import apoc.kafka.service.sink.strategy.CUDNodeRel +import apoc.kafka.service.sink.strategy.CUDOperations +import apoc.kafka.service.sink.strategy.CUDRelationship +import apoc.kafka.service.sink.strategy.QueryEvents +import apoc.kafka.utils.KafkaUtil +import org.junit.Test +import kotlin.test.assertEquals +import kotlin.test.assertTrue + +class CUDIngestionStrategyTest { + + private fun findEventByQuery(query: String, evts: List) = evts.find { it.query == query }!! + + private fun assertNodeEventsContainsKey(qe: QueryEvents, vararg keys: String) = assertTrue { + qe.events.all { + val ids = it[CUDIngestionStrategy.ID_KEY] as Map + ids.keys.containsAll(keys.toList()) + } + } + + private fun assertRelationshipEventsContainsKey(qe: QueryEvents, fromKey: String, toKey: String) = assertTrue { + qe.events.all { + val from = it["from"] as Map + val idsFrom = from[CUDIngestionStrategy.ID_KEY] as Map + val to = it["to"] as Map + val idsTo = to[CUDIngestionStrategy.ID_KEY] as Map + idsFrom.containsKey(fromKey) && idsTo.containsKey(toKey) + } + } + + @Test + fun `should create, merge and update nodes`() { + // given + val mergeMarkers = listOf(2, 5, 7) + val updateMarkers = listOf(3, 6) + val key = "key" + val list = (1..10).map { + val labels = if (it % 2 == 0) listOf("Foo", "Bar") else listOf("Foo", "Bar", "Label") + val properties = mapOf("foo" to "foo-value-$it", "id" to it) + val (op, ids) = when (it) { + in mergeMarkers -> CUDOperations.merge to mapOf(key to it) + in updateMarkers -> CUDOperations.update to mapOf(key to it) + else -> CUDOperations.create to emptyMap() + } + val cudNode = CUDNode(op = op, + labels = labels, + ids = ids, + properties = properties) + StreamsSinkEntity(null, cudNode) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val nodeEvents = cudQueryStrategy.mergeNodeEvents(list) + val nodeDeleteEvents = cudQueryStrategy.deleteNodeEvents(list) + + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + // then + assertEquals(emptyList(), nodeDeleteEvents) + assertEquals(emptyList(), relationshipEvents) + assertEquals(emptyList(), relationshipDeleteEvents) + + assertEquals(6, nodeEvents.size) + assertEquals(10, nodeEvents.map { it.events.size }.sum()) + val createNodeFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |CREATE (n:Foo:Bar) + |SET n = event.properties + """.trimMargin(), nodeEvents) + assertEquals(3, createNodeFooBar.events.size) + val createNodeFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |CREATE (n:Foo:Bar:Label) + |SET n = event.properties + """.trimMargin(), nodeEvents) + assertEquals(2, createNodeFooBarLabel.events.size) + val mergeNodeFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MERGE (n:Foo:Bar {${key.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${key.quote()}}) + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(1, mergeNodeFooBar.events.size) + assertNodeEventsContainsKey(mergeNodeFooBar, key) + val mergeNodeFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MERGE (n:Foo:Bar:Label {${key.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${key.quote()}}) + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(2, mergeNodeFooBarLabel.events.size) + assertNodeEventsContainsKey(mergeNodeFooBarLabel, key) + val updateNodeFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (n:Foo:Bar {${key.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${key.quote()}}) + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(1, updateNodeFooBar.events.size) + assertNodeEventsContainsKey(updateNodeFooBar, key) + val updateNodeFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (n:Foo:Bar:Label {${key.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${key.quote()}}) + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(1, updateNodeFooBarLabel.events.size) + assertNodeEventsContainsKey(updateNodeFooBarLabel, key) + } + + @Test + fun `should create, merge, update and delete nodes with garbage data`() { + // given + val mergeMarkers = listOf(2, 5, 7) + val updateMarkers = listOf(3, 6) + val deleteMarkers = listOf(10) + val key = "not..... SO SIMPLE!" + val list = (1..10).map { + val labels = if (it % 2 == 0) listOf("WellBehaved", "C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l") else listOf("WellBehaved", "C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l", "Label") + val properties = if (it in deleteMarkers) emptyMap() else mapOf("foo" to "foo-value-$it", "id" to it) + val (op, ids) = when (it) { + in mergeMarkers -> CUDOperations.merge to mapOf(key to it) + in updateMarkers -> CUDOperations.update to mapOf(key to it) + in deleteMarkers -> CUDOperations.delete to mapOf(key to it) + else -> CUDOperations.create to emptyMap() + } + val cudNode = CUDNode(op = op, + labels = labels, + ids = ids, + properties = properties) + StreamsSinkEntity(null, cudNode) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val nodeEvents = cudQueryStrategy.mergeNodeEvents(list) + val nodeDeleteEvents = cudQueryStrategy.deleteNodeEvents(list) + + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + // then + assertEquals(emptyList(), relationshipEvents) + assertEquals(emptyList(), relationshipDeleteEvents) + + assertEquals(6, nodeEvents.size) + assertEquals(9, nodeEvents.map { it.events.size }.sum()) + val createNodeFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |CREATE (n:WellBehaved:`C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l`) + |SET n = event.properties + """.trimMargin(), nodeEvents) + assertEquals(2, createNodeFooBar.events.size) + val createNodeFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |CREATE (n:WellBehaved:`C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l`:Label) + |SET n = event.properties + """.trimMargin(), nodeEvents) + assertEquals(2, createNodeFooBarLabel.events.size) + val mergeNodeFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MERGE (n:WellBehaved:`C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l` {`$key`: event.${CUDIngestionStrategy.ID_KEY}.`$key`}) + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(1, mergeNodeFooBar.events.size) + assertNodeEventsContainsKey(mergeNodeFooBar, key) + val mergeNodeFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MERGE (n:WellBehaved:`C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l`:Label {`$key`: event.${CUDIngestionStrategy.ID_KEY}.`$key`}) + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(2, mergeNodeFooBarLabel.events.size) + assertNodeEventsContainsKey(mergeNodeFooBarLabel, key) + val updateNodeFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (n:WellBehaved:`C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l` {`$key`: event.${CUDIngestionStrategy.ID_KEY}.`$key`}) + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(1, updateNodeFooBar.events.size) + assertNodeEventsContainsKey(updateNodeFooBar, key) + val updateNodeFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (n:WellBehaved:`C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l`:Label {`$key`: event.${CUDIngestionStrategy.ID_KEY}.`$key`}) + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(1, updateNodeFooBarLabel.events.size) + assertNodeEventsContainsKey(updateNodeFooBarLabel, key) + + assertEquals(1, nodeDeleteEvents.size) + val nodeDeleteEvent = nodeDeleteEvents.first() + assertEquals(""" + |${KafkaUtil.UNWIND} + |MATCH (n:WellBehaved:`C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l` {`$key`: event.${CUDIngestionStrategy.ID_KEY}.`$key`}) + |DETACH DELETE n + """.trimMargin(), nodeDeleteEvent.query) + } + + @Test + fun `should create nodes only with valid CUD operations`() { + // given + val mergeMarkers = listOf(2, 5, 7) + val invalidMarkers = listOf(3, 6, 9) + val list = (1..10).map { + val labels = listOf("Foo", "Bar", "Label") + val properties = mapOf("foo" to "foo-value-$it", "id" to it) + val (op, ids) = when (it) { + in mergeMarkers -> CUDOperations.merge to mapOf("_id" to it) + in invalidMarkers -> CUDOperations.match to mapOf("_id" to it) + else -> CUDOperations.create to emptyMap() + } + val cudNode = CUDNode(op = op, + labels = labels, + ids = ids, + properties = properties) + StreamsSinkEntity(null, cudNode) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val nodeEvents = cudQueryStrategy.mergeNodeEvents(list) + val nodeDeleteEvents = cudQueryStrategy.deleteNodeEvents(list) + + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + // then + assertEquals(emptyList(), nodeDeleteEvents) + assertEquals(emptyList(), relationshipEvents) + assertEquals(emptyList(), relationshipDeleteEvents) + + assertEquals(2, nodeEvents.size) + assertEquals(7, nodeEvents.map { it.events.size }.sum()) + val createNodeFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |CREATE (n:Foo:Bar:Label) + |SET n = event.properties + """.trimMargin(),nodeEvents) + assertEquals(4, createNodeFooBarLabel.events.size) + val mergeNodeFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (n) WHERE id(n) = event.ids._id + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(3, mergeNodeFooBar.events.size) + assertNodeEventsContainsKey(mergeNodeFooBar, "_id") + } + + @Test + fun `should create, merge and update relationships only with valid node operations`() { + // given + val mergeMarkers = listOf(2, 5, 7) + val invalidMarker = listOf(3, 4, 6, 9) + val key = "key" + val list = (1..10).map { + val labels = listOf("Foo", "Bar", "Label") + val properties = mapOf("foo" to "foo-value-$it", "id" to it) + val op = when (it) { + in mergeMarkers -> CUDOperations.merge + else -> CUDOperations.create + } + val start = CUDNodeRel(ids = mapOf(key to it), labels = labels, op= if (it in invalidMarker) CUDOperations.delete else CUDOperations.create) + val end = CUDNodeRel(ids = mapOf(key to it + 1), labels = labels) + val rel = CUDRelationship(op = op, properties = properties, from = start, to = end, rel_type = "MY_REL") + StreamsSinkEntity(null, rel) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val nodeEvents = cudQueryStrategy.mergeNodeEvents(list) + val nodeDeleteEvents = cudQueryStrategy.deleteNodeEvents(list) + + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + // then + assertEquals(emptyList(), nodeEvents) + assertEquals(emptyList(), nodeDeleteEvents) + assertEquals(emptyList(), relationshipDeleteEvents) + + assertEquals(2, relationshipEvents.size) + assertEquals(6, relationshipEvents.map { it.events.size }.sum()) + val createRelFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |CREATE (from:Foo:Bar:Label {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MATCH (to:Foo:Bar:Label {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |CREATE (from)-[r:MY_REL]->(to) + |SET r = event.properties + """.trimMargin(), relationshipEvents) + assertEquals(3, createRelFooBarLabel.events.size) + assertRelationshipEventsContainsKey(createRelFooBarLabel, key, key) + val mergeRelFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |CREATE (from:Foo:Bar:Label {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MATCH (to:Foo:Bar:Label {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MERGE (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(3, mergeRelFooBarLabel.events.size) + assertRelationshipEventsContainsKey(mergeRelFooBarLabel, key, key) + } + + @Test + fun `should delete nodes with internal id reference`() { + // given + val detachMarkers = listOf(1, 3, 8, 10) + val list = (1..10).map { + val labels = if (it % 2 == 0) listOf("Foo", "Bar") else listOf("Foo", "Bar", "Label") + val detach = it in detachMarkers + val properties = emptyMap() + val cudNode = CUDNode(op = CUDOperations.delete, + labels = labels, + ids = mapOf("_id" to it), + properties = properties, + detach = detach) + StreamsSinkEntity(null, cudNode) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val nodeEvents = cudQueryStrategy.mergeNodeEvents(list) + val nodeDeleteEvents = cudQueryStrategy.deleteNodeEvents(list) + + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + // then + assertEquals(emptyList(), nodeEvents) + assertEquals(emptyList(), relationshipEvents) + assertEquals(emptyList(), relationshipDeleteEvents) + + assertEquals(4, nodeDeleteEvents.size) + assertEquals(10, nodeDeleteEvents.map { it.events.size }.sum()) + val deleteNodeFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (n) WHERE id(n) = event.${CUDIngestionStrategy.ID_KEY}._id + |DELETE n + """.trimMargin(), nodeDeleteEvents) + assertEquals(3, deleteNodeFooBar.events.size) + val key = "_id" + assertNodeEventsContainsKey(deleteNodeFooBar, key) + val deleteNodeFooBarDetach = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (n) WHERE id(n) = event.${CUDIngestionStrategy.ID_KEY}._id + |DETACH DELETE n + """.trimMargin(), nodeDeleteEvents) + assertEquals(2, deleteNodeFooBarDetach.events.size) + assertNodeEventsContainsKey(deleteNodeFooBarDetach, key) + val deleteNodeFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (n) WHERE id(n) = event.${CUDIngestionStrategy.ID_KEY}._id + |DELETE n + """.trimMargin(), nodeDeleteEvents) + assertEquals(3, deleteNodeFooBarLabel.events.size) + assertNodeEventsContainsKey(deleteNodeFooBarLabel, key) + val deleteNodeFooBarLabelDetach = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (n) WHERE id(n) = event.${CUDIngestionStrategy.ID_KEY}._id + |DETACH DELETE n + """.trimMargin(), nodeDeleteEvents) + assertEquals(2, deleteNodeFooBarLabelDetach.events.size) + assertNodeEventsContainsKey(deleteNodeFooBarLabelDetach, key) + } + + @Test + fun `should create, merge and update nodes with internal id reference`() { + // given + val mergeMarkers = listOf(2, 5, 7) + val updateMarkers = listOf(3, 6) + val list = (1..10).map { + val labels = if (it % 2 == 0) listOf("Foo", "Bar") else listOf("Foo", "Bar", "Label") + val properties = mapOf("foo" to "foo-value-$it", "id" to it) + val (op, ids) = when (it) { + in mergeMarkers -> CUDOperations.merge to mapOf("_id" to it) + in updateMarkers -> CUDOperations.update to mapOf("_id" to it) + else -> CUDOperations.create to emptyMap() + } + val cudNode = CUDNode(op = op, + labels = labels, + ids = ids, + properties = properties) + StreamsSinkEntity(null, cudNode) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val nodeEvents = cudQueryStrategy.mergeNodeEvents(list) + val nodeDeleteEvents = cudQueryStrategy.deleteNodeEvents(list) + + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + // then + assertEquals(emptyList(), nodeDeleteEvents) + assertEquals(emptyList(), relationshipEvents) + assertEquals(emptyList(), relationshipDeleteEvents) + + assertEquals(3, nodeEvents.size) + assertEquals(10, nodeEvents.map { it.events.size }.sum()) + val createNodeFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |CREATE (n:Foo:Bar) + |SET n = event.properties + """.trimMargin(), nodeEvents) + assertEquals(3, createNodeFooBar.events.size) + val createNodeFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |CREATE (n:Foo:Bar:Label) + |SET n = event.properties + """.trimMargin(),nodeEvents) + assertEquals(2, createNodeFooBarLabel.events.size) + val mergeNodeFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (n) WHERE id(n) = event.ids._id + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(5, mergeNodeFooBar.events.size) + assertNodeEventsContainsKey(mergeNodeFooBar, "_id") + } + + @Test + fun `should create, merge and update relationships`() { + // given + val mergeMarkers = listOf(2, 5, 7) + val updateMarkers = listOf(3, 6) + val key = "key" + val list = (1..10).map { + val labels = if (it % 2 == 0) listOf("Foo", "Bar") else listOf("Foo", "Bar", "Label") + val properties = mapOf("foo" to "foo-value-$it", "id" to it) + val op = when (it) { + in mergeMarkers -> CUDOperations.merge + in updateMarkers -> CUDOperations.update + else -> CUDOperations.create + } + val start = CUDNodeRel(ids = mapOf(key to it), labels = labels) + val end = CUDNodeRel(ids = mapOf(key to it + 1), labels = labels) + val rel = CUDRelationship(op = op, properties = properties, from = start, to = end, rel_type = "MY_REL") + StreamsSinkEntity(null, rel) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val nodeEvents = cudQueryStrategy.mergeNodeEvents(list) + val nodeDeleteEvents = cudQueryStrategy.deleteNodeEvents(list) + + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + // then + assertEquals(emptyList(), nodeEvents) + assertEquals(emptyList(), nodeDeleteEvents) + assertEquals(emptyList(), relationshipDeleteEvents) + + assertEquals(6, relationshipEvents.size) + assertEquals(10, relationshipEvents.map { it.events.size }.sum()) + val createRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MATCH (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |CREATE (from)-[r:MY_REL]->(to) + |SET r = event.properties + """.trimMargin(), relationshipEvents) + assertEquals(3, createRelFooBar.events.size) + assertRelationshipEventsContainsKey(createRelFooBar, key, key) + val mergeRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MATCH (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MERGE (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(1, mergeRelFooBar.events.size) + assertRelationshipEventsContainsKey(mergeRelFooBar, key, key) + val updateRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(1, updateRelFooBar.events.size) + assertRelationshipEventsContainsKey(updateRelFooBar, key, key) + val createRelFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar:Label {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MATCH (to:Foo:Bar:Label {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |CREATE (from)-[r:MY_REL]->(to) + |SET r = event.properties + """.trimMargin(), relationshipEvents) + assertEquals(2, createRelFooBarLabel.events.size) + assertRelationshipEventsContainsKey(createRelFooBarLabel, key, key) + val mergeRelFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar:Label {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MATCH (to:Foo:Bar:Label {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MERGE (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(2, mergeRelFooBarLabel.events.size) + assertRelationshipEventsContainsKey(mergeRelFooBarLabel, key, key) + val updateRelFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar:Label {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (to:Foo:Bar:Label {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(1, updateRelFooBarLabel.events.size) + assertRelationshipEventsContainsKey(updateRelFooBarLabel, key, key) + } + + @Test + fun `should create and delete relationship also without properties field`() { + val key = "key" + val startNode = "SourceNode" + val endNode = "TargetNode" + val relType = "MY_REL" + val start = CUDNodeRel(ids = mapOf(key to 1), labels = listOf(startNode)) + val end = CUDNodeRel(ids = mapOf(key to 2), labels = listOf(endNode)) + val list = listOf(CUDOperations.create, CUDOperations.delete, CUDOperations.update).map { + val rel = CUDRelationship(op = it, from = start, to = end, rel_type = relType) + StreamsSinkEntity(null, rel) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + assertEquals(2, relationshipEvents.size) + val createRel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:$startNode {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MATCH (to:$endNode {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |CREATE (from)-[r:$relType]->(to) + |SET r = event.properties + """.trimMargin(), relationshipEvents) + assertEquals(1, createRel.events.size) + val updateRel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:$startNode {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (to:$endNode {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(1, updateRel.events.size) + + assertEquals(1, relationshipDeleteEvents.size) + val deleteRel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:$startNode {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (to:$endNode {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (from)-[r:$relType]->(to) + |DELETE r + """.trimMargin(), relationshipDeleteEvents) + assertEquals(1, deleteRel.events.size) + } + + @Test + fun `should create, merge and update relationships with merge op in 'from' and 'to' node`() { + // given + val mergeMarkers = listOf(2, 5, 7) + val updateMarkers = listOf(3, 6) + val key = "key" + val list = (1..10).map { + val labels = if (it % 2 == 0) listOf("Foo", "Bar") else listOf("Foo", "Bar", "Label") + val properties = mapOf("foo" to "foo-value-$it", "id" to it) + val op = when (it) { + in mergeMarkers -> CUDOperations.merge + in updateMarkers -> CUDOperations.update + else -> CUDOperations.create + } + val start = CUDNodeRel(ids = mapOf(key to it), labels = labels, op = CUDOperations.merge) + val end = CUDNodeRel(ids = mapOf(key to it + 1), labels = labels, op = CUDOperations.merge) + val rel = CUDRelationship(op = op, properties = properties, from = start, to = end, rel_type = "MY_REL") + StreamsSinkEntity(null, rel) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val nodeEvents = cudQueryStrategy.mergeNodeEvents(list) + val nodeDeleteEvents = cudQueryStrategy.deleteNodeEvents(list) + + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + // then + assertEquals(emptyList(), nodeEvents) + assertEquals(emptyList(), nodeDeleteEvents) + assertEquals(emptyList(), relationshipDeleteEvents) + + assertEquals(6, relationshipEvents.size) + assertEquals(10, relationshipEvents.map { it.events.size }.sum()) + val createRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MERGE (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MERGE (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |CREATE (from)-[r:MY_REL]->(to) + |SET r = event.properties + """.trimMargin(), relationshipEvents) + assertEquals(3, createRelFooBar.events.size) + assertRelationshipEventsContainsKey(createRelFooBar, key, key) + val mergeRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MERGE (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MERGE (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MERGE (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(1, mergeRelFooBar.events.size) + assertRelationshipEventsContainsKey(mergeRelFooBar, key, key) + val updateRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(1, updateRelFooBar.events.size) + assertRelationshipEventsContainsKey(updateRelFooBar, key, key) + val createRelFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MERGE (from:Foo:Bar:Label {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MERGE (to:Foo:Bar:Label {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |CREATE (from)-[r:MY_REL]->(to) + |SET r = event.properties + """.trimMargin(), relationshipEvents) + assertEquals(2, createRelFooBarLabel.events.size) + assertRelationshipEventsContainsKey(createRelFooBarLabel, key, key) + val mergeRelFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MERGE (from:Foo:Bar:Label {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MERGE (to:Foo:Bar:Label {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MERGE (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(2, mergeRelFooBarLabel.events.size) + assertRelationshipEventsContainsKey(mergeRelFooBarLabel, key, key) + val updateRelFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar:Label {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (to:Foo:Bar:Label {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(1, updateRelFooBarLabel.events.size) + assertRelationshipEventsContainsKey(updateRelFooBarLabel, key, key) + } + + @Test + fun `should create, merge and update relationships with match op in 'from' node and merge or create in 'to' node`() { + // given + val mergeMarkers = listOf(2, 5, 7) + val updateMarkers = listOf(3, 6) + val key = "key" + val list = (1..10).map { + val labels = listOf("Foo", "Bar") + val properties = mapOf("foo" to "foo-value-$it", "id" to it) + val op = when (it) { + in mergeMarkers -> CUDOperations.merge + in updateMarkers -> CUDOperations.update + else -> CUDOperations.create + } + val start = CUDNodeRel(ids = mapOf(key to it), labels = labels, op = CUDOperations.match) + val end = CUDNodeRel(ids = mapOf(key to it + 1), labels = labels, op = if (it <= 5) CUDOperations.merge else CUDOperations.create) + val rel = CUDRelationship(op = op, properties = properties, from = start, to = end, rel_type = "MY_REL") + StreamsSinkEntity(null, rel) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val nodeEvents = cudQueryStrategy.mergeNodeEvents(list) + val nodeDeleteEvents = cudQueryStrategy.deleteNodeEvents(list) + + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + // then + assertEquals(emptyList(), nodeEvents) + assertEquals(emptyList(), nodeDeleteEvents) + assertEquals(emptyList(), relationshipDeleteEvents) + + assertEquals(6, relationshipEvents.size) + assertEquals(10, relationshipEvents.map { it.events.size }.sum()) + val createRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MERGE (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |CREATE (from)-[r:MY_REL]->(to) + |SET r = event.properties + """.trimMargin(), relationshipEvents) + assertEquals(2, createRelFooBar.events.size) + assertRelationshipEventsContainsKey(createRelFooBar, key, key) + val mergeRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MERGE (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MERGE (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(2, mergeRelFooBar.events.size) + assertRelationshipEventsContainsKey(mergeRelFooBar, key, key) + val matchMergeAndMergeRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MERGE (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MERGE (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(2, matchMergeAndMergeRelFooBar.events.size) + assertRelationshipEventsContainsKey(matchMergeAndMergeRelFooBar, key, key) + val matchMergeAndCreateRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |CREATE (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MERGE (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(1, matchMergeAndCreateRelFooBar.events.size) + assertRelationshipEventsContainsKey(matchMergeAndCreateRelFooBar, key, key) + val updateRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(1, updateRelFooBar.events.size) + assertRelationshipEventsContainsKey(updateRelFooBar, key, key) + val mergeRelFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |CREATE (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |CREATE (from)-[r:MY_REL]->(to) + |SET r = event.properties + """.trimMargin(), relationshipEvents) + assertEquals(3, mergeRelFooBarLabel.events.size) + assertRelationshipEventsContainsKey(mergeRelFooBarLabel, key, key) + } + + @Test + fun `should create, merge and update relationships with match op in 'to' node and merge or create in 'from' node`() { + // given + val mergeMarkers = listOf(2, 5, 7) + val updateMarkers = listOf(3, 6) + val key = "key" + val list = (1..10).map { + val labels = listOf("Foo", "Bar") + val properties = mapOf("foo" to "foo-value-$it", "id" to it) + val op = when (it) { + in mergeMarkers -> CUDOperations.merge + in updateMarkers -> CUDOperations.update + else -> CUDOperations.create + } + val start = CUDNodeRel(ids = mapOf(key to it), labels = labels, op = if (it <= 5) CUDOperations.merge else CUDOperations.create) + val end = CUDNodeRel(ids = mapOf(key to it + 1), labels = labels, CUDOperations.match) + val rel = CUDRelationship(op = op, properties = properties, from = start, to = end, rel_type = "MY_REL") + StreamsSinkEntity(null, rel) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val nodeEvents = cudQueryStrategy.mergeNodeEvents(list) + val nodeDeleteEvents = cudQueryStrategy.deleteNodeEvents(list) + + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + // then + assertEquals(emptyList(), nodeEvents) + assertEquals(emptyList(), nodeDeleteEvents) + assertEquals(emptyList(), relationshipDeleteEvents) + + assertEquals(6, relationshipEvents.size) + assertEquals(10, relationshipEvents.map { it.events.size }.sum()) + val createRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MERGE (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MATCH (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |CREATE (from)-[r:MY_REL]->(to) + |SET r = event.properties + """.trimMargin(), relationshipEvents) + assertEquals(2, createRelFooBar.events.size) + assertRelationshipEventsContainsKey(createRelFooBar, key, key) + val mergeRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MERGE (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MATCH (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MERGE (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(2, mergeRelFooBar.events.size) + assertRelationshipEventsContainsKey(mergeRelFooBar, key, key) + val matchMergeAndMergeRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MERGE (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MATCH (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MERGE (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(2, matchMergeAndMergeRelFooBar.events.size) + assertRelationshipEventsContainsKey(matchMergeAndMergeRelFooBar, key, key) + val matchMergeAndCreateRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |CREATE (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MATCH (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MERGE (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(1, matchMergeAndCreateRelFooBar.events.size) + assertRelationshipEventsContainsKey(matchMergeAndCreateRelFooBar, key, key) + val updateRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(1, updateRelFooBar.events.size) + assertRelationshipEventsContainsKey(updateRelFooBar, key, key) + val mergeRelFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |CREATE (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |${KafkaUtil.WITH_EVENT_FROM} + |MATCH (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |CREATE (from)-[r:MY_REL]->(to) + |SET r = event.properties + """.trimMargin(), relationshipEvents) + assertEquals(3, mergeRelFooBarLabel.events.size) + assertRelationshipEventsContainsKey(mergeRelFooBarLabel, key, key) + } + + @Test + fun `should delete relationships`() { + // given + val key = "key" + val list = (1..10).map { + val labels = if (it % 2 == 0) listOf("Foo", "Bar") else listOf("Foo", "Bar", "Label") + val properties = emptyMap() + val start = CUDNodeRel(ids = mapOf(key to it), labels = labels) + val end = CUDNodeRel(ids = mapOf(key to it + 1), labels = labels) + val rel = CUDRelationship(op = CUDOperations.delete, properties = properties, from = start, to = end, rel_type = "MY_REL") + StreamsSinkEntity(null, rel) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val nodeEvents = cudQueryStrategy.mergeNodeEvents(list) + val nodeDeleteEvents = cudQueryStrategy.deleteNodeEvents(list) + + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + // then + assertEquals(emptyList(), nodeEvents) + assertEquals(emptyList(), nodeDeleteEvents) + assertEquals(emptyList(), relationshipEvents) + + assertEquals(2, relationshipDeleteEvents.size) + assertEquals(10, relationshipDeleteEvents.map { it.events.size }.sum()) + val deleteRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (to:Foo:Bar {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (from)-[r:MY_REL]->(to) + |DELETE r + """.trimMargin(), relationshipDeleteEvents) + assertEquals(5, deleteRelFooBar.events.size) + assertRelationshipEventsContainsKey(deleteRelFooBar, key, key) + val deleteRelFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from:Foo:Bar:Label {key: event.from.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (to:Foo:Bar:Label {key: event.to.${CUDIngestionStrategy.ID_KEY}.key}) + |MATCH (from)-[r:MY_REL]->(to) + |DELETE r + """.trimMargin(), relationshipDeleteEvents) + assertEquals(5, deleteRelFooBarLabel.events.size) + assertRelationshipEventsContainsKey(deleteRelFooBarLabel, key, key) + } + + @Test + fun `should delete relationships with internal id reference`() { + // given + val key = "_id" + val list = (1..10).map { + val labels = if (it % 2 == 0) listOf("Foo", "Bar") else listOf("Foo", "Bar", "Label") + val properties = emptyMap() + val start = CUDNodeRel(ids = mapOf(key to it), labels = labels) + val relKey = if (it % 2 == 0) key else "key" + val end = CUDNodeRel(ids = mapOf(relKey to it + 1), labels = labels) + val rel = CUDRelationship(op = CUDOperations.delete, properties = properties, from = start, to = end, rel_type = "MY_REL") + StreamsSinkEntity(null, rel) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val nodeEvents = cudQueryStrategy.mergeNodeEvents(list) + val nodeDeleteEvents = cudQueryStrategy.deleteNodeEvents(list) + + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + // then + assertEquals(emptyList(), nodeEvents) + assertEquals(emptyList(), nodeDeleteEvents) + assertEquals(emptyList(), relationshipEvents) + + assertEquals(2, relationshipDeleteEvents.size) + assertEquals(10, relationshipDeleteEvents.map { it.events.size }.sum()) + val deleteRel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from) WHERE id(from) = event.from.${CUDIngestionStrategy.ID_KEY}._id + |MATCH (to) WHERE id(to) = event.to.${CUDIngestionStrategy.ID_KEY}._id + |MATCH (from)-[r:MY_REL]->(to) + |DELETE r + """.trimMargin(), relationshipDeleteEvents) + assertEquals(5, deleteRel.events.size) + assertRelationshipEventsContainsKey(deleteRel, key, key) + val relKey = "key" + val deleteRelFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from) WHERE id(from) = event.from.${CUDIngestionStrategy.ID_KEY}._id + |MATCH (to:Foo:Bar:Label {$relKey: event.to.${CUDIngestionStrategy.ID_KEY}.$relKey}) + |MATCH (from)-[r:MY_REL]->(to) + |DELETE r + """.trimMargin(), relationshipDeleteEvents) + assertEquals(5, deleteRelFooBarLabel.events.size) + assertRelationshipEventsContainsKey(deleteRelFooBarLabel, key, relKey) + } + + @Test + fun `should create, merge and update relationships with internal id reference`() { + // given + val mergeMarkers = listOf(2, 5, 7) + val updateMarkers = listOf(3, 6) + val list = (1..10).map { + val labels = if (it % 2 == 0) listOf("Foo", "Bar") else listOf("Foo", "Bar", "Label") + val properties = mapOf("foo" to "foo-value-$it", "id" to it) + val op = when (it) { + in mergeMarkers -> CUDOperations.merge + in updateMarkers -> CUDOperations.update + else -> CUDOperations.create + } + val start = CUDNodeRel(ids = mapOf("_id" to it), labels = labels) + val end = CUDNodeRel(ids = mapOf("_id" to it + 1), labels = labels) + val rel = CUDRelationship(op = op, properties = properties, from = start, to = end, rel_type = "MY_REL") + StreamsSinkEntity(null, rel) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val nodeEvents = cudQueryStrategy.mergeNodeEvents(list) + val nodeDeleteEvents = cudQueryStrategy.deleteNodeEvents(list) + + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + // then + assertEquals(emptyList(), nodeEvents) + assertEquals(emptyList(), nodeDeleteEvents) + assertEquals(emptyList(), relationshipDeleteEvents) + + assertEquals(3, relationshipEvents.size) + assertEquals(10, relationshipEvents.map { it.events.size }.sum()) + val createRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from) WHERE id(from) = event.from.${CUDIngestionStrategy.ID_KEY}._id + |${KafkaUtil.WITH_EVENT_FROM} + |MATCH (to) WHERE id(to) = event.to.${CUDIngestionStrategy.ID_KEY}._id + |CREATE (from)-[r:MY_REL]->(to) + |SET r = event.properties + """.trimMargin(), relationshipEvents) + assertEquals(5, createRelFooBar.events.size) + val key = "_id" + assertRelationshipEventsContainsKey(createRelFooBar, key, key) + val mergeRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from) WHERE id(from) = event.from.${CUDIngestionStrategy.ID_KEY}._id + |${KafkaUtil.WITH_EVENT_FROM} + |MATCH (to) WHERE id(to) = event.to.${CUDIngestionStrategy.ID_KEY}._id + |MERGE (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(3, mergeRelFooBar.events.size) + assertRelationshipEventsContainsKey(mergeRelFooBar, key, key) + val updateRelFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (from) WHERE id(from) = event.from.${CUDIngestionStrategy.ID_KEY}._id + |MATCH (to) WHERE id(to) = event.to.${CUDIngestionStrategy.ID_KEY}._id + |MATCH (from)-[r:MY_REL]->(to) + |SET r += event.properties + """.trimMargin(), relationshipEvents) + assertEquals(2, updateRelFooBar.events.size) + assertRelationshipEventsContainsKey(updateRelFooBar, key, key) + } + + @Test + fun `should create, merge, update and delete nodes with compound keys`() { + // given + val mergeMarkers = listOf(2, 5, 7) + val updateMarkers = listOf(3, 6) + val deleteMarkers = listOf(10) + val firstKey = "not..... SO SIMPLE!" + val secondKey = "otherKey" + val list = (1..10).map { + val labels = if (it % 2 == 0) listOf("WellBehaved", "C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l") else listOf("WellBehaved", "C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l", "Label") + val properties = if (it in deleteMarkers) emptyMap() else mapOf("foo" to "foo-value-$it", "id" to it) + val (op, ids) = when (it) { + in mergeMarkers -> CUDOperations.merge to mapOf(firstKey to it, secondKey to it) + in updateMarkers -> CUDOperations.update to mapOf(firstKey to it, secondKey to it) + in deleteMarkers -> CUDOperations.delete to mapOf(firstKey to it, secondKey to it) + else -> CUDOperations.create to emptyMap() + } + val cudNode = CUDNode(op = op, + labels = labels, + ids = ids, + properties = properties) + StreamsSinkEntity(null, cudNode) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val nodeEvents = cudQueryStrategy.mergeNodeEvents(list) + val nodeDeleteEvents = cudQueryStrategy.deleteNodeEvents(list) + + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + // then + assertEquals(emptyList(), relationshipEvents) + assertEquals(emptyList(), relationshipDeleteEvents) + + assertEquals(6, nodeEvents.size) + assertEquals(9, nodeEvents.map { it.events.size }.sum()) + val createNodeFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |CREATE (n:WellBehaved:`C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l`) + |SET n = event.properties + """.trimMargin(), nodeEvents) + assertEquals(2, createNodeFooBar.events.size) + val createNodeFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |CREATE (n:WellBehaved:`C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l`:Label) + |SET n = event.properties + """.trimMargin(), nodeEvents) + assertEquals(2, createNodeFooBarLabel.events.size) + + val mergeNodeFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MERGE (n:WellBehaved:`C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l` {${firstKey.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${firstKey.quote()}, ${secondKey.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${secondKey.quote()}}) + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(1, mergeNodeFooBar.events.size) + assertNodeEventsContainsKey(mergeNodeFooBar, firstKey, secondKey) + + val mergeNodeFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MERGE (n:WellBehaved:`C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l`:Label {${firstKey.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${firstKey.quote()}, ${secondKey.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${secondKey.quote()}}) + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(2, mergeNodeFooBarLabel.events.size) + assertNodeEventsContainsKey(mergeNodeFooBarLabel, firstKey, secondKey) + + val updateNodeFooBar = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (n:WellBehaved:`C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l` {${firstKey.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${firstKey.quote()}, ${secondKey.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${secondKey.quote()}}) + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(1, updateNodeFooBar.events.size) + assertNodeEventsContainsKey(updateNodeFooBar, firstKey, secondKey) + + val updateNodeFooBarLabel = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (n:WellBehaved:`C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l`:Label {${firstKey.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${firstKey.quote()}, ${secondKey.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${secondKey.quote()}}) + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(1, updateNodeFooBarLabel.events.size) + assertNodeEventsContainsKey(updateNodeFooBarLabel, firstKey, secondKey) + + assertEquals(1, nodeDeleteEvents.size) + val nodeDeleteEvent = nodeDeleteEvents.first() + assertEquals(""" + |${KafkaUtil.UNWIND} + |MATCH (n:WellBehaved:`C̸r̵a̵z̵y̵ ̶.̵ ̶ ̴ ̸ ̶ ̶ ̵ ̴L̴a̵b̸e̶l` {${firstKey.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${firstKey.quote()}, ${secondKey.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${secondKey.quote()}}) + |DETACH DELETE n + """.trimMargin(), nodeDeleteEvent.query) + assertEquals(1, nodeDeleteEvent.events.size) + assertNodeEventsContainsKey(updateNodeFooBar, firstKey, secondKey) + } + + @Test + fun `should create, merge, update and delete nodes without labels`() { + // given + val mergeMarkers = listOf(2, 5, 7) + val updateMarkers = listOf(3, 6) + val deleteMarkers = listOf(10) + val key = "key" + val list = (1..10).map { + val labels = emptyList() + val properties = if (it in deleteMarkers) emptyMap() else mapOf("foo" to "foo-value-$it", "id" to it) + val (op, ids) = when (it) { + in mergeMarkers -> CUDOperations.merge to mapOf(key to it) + in updateMarkers -> CUDOperations.update to mapOf(key to it) + in deleteMarkers -> CUDOperations.delete to mapOf(key to it) + else -> CUDOperations.create to emptyMap() + } + val cudNode = CUDNode(op = op, + labels = labels, + ids = ids, + properties = properties) + StreamsSinkEntity(null, cudNode) + } + + // when + val cudQueryStrategy = CUDIngestionStrategy() + val nodeEvents = cudQueryStrategy.mergeNodeEvents(list) + val nodeDeleteEvents = cudQueryStrategy.deleteNodeEvents(list) + + val relationshipEvents = cudQueryStrategy.mergeRelationshipEvents(list) + val relationshipDeleteEvents = cudQueryStrategy.deleteRelationshipEvents(list) + + // then + assertEquals(emptyList(), relationshipEvents) + assertEquals(emptyList(), relationshipDeleteEvents) + + assertEquals(3, nodeEvents.size) + assertEquals(9, nodeEvents.map { it.events.size }.sum()) + val createNode = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |CREATE (n) + |SET n = event.properties + """.trimMargin(), nodeEvents) + assertEquals(4, createNode.events.size) + + val mergeNode = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MERGE (n {${key.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${key.quote()}}) + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(3, mergeNode.events.size) + assertNodeEventsContainsKey(mergeNode, key) + + val updateNode = findEventByQuery(""" + |${KafkaUtil.UNWIND} + |MATCH (n {${key.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${key.quote()}}) + |SET n += event.properties + """.trimMargin(), nodeEvents) + assertEquals(2, updateNode.events.size) + assertNodeEventsContainsKey(updateNode, key) + + assertEquals(1, nodeDeleteEvents.size) + val nodeDeleteEvent = nodeDeleteEvents.first() + assertEquals(""" + |${KafkaUtil.UNWIND} + |MATCH (n {${key.quote()}: event.${CUDIngestionStrategy.ID_KEY}.${key.quote()}}) + |DETACH DELETE n + """.trimMargin(), nodeDeleteEvent.query) + assertEquals(1, nodeDeleteEvent.events.size) + assertNodeEventsContainsKey(updateNode, key) + } + +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/common/strategy/NodePatternIngestionStrategyTest.kt b/extended/src/test/kotlin/apoc/kafka/common/strategy/NodePatternIngestionStrategyTest.kt new file mode 100644 index 0000000000..78ab9d1795 --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/strategy/NodePatternIngestionStrategyTest.kt @@ -0,0 +1,196 @@ +package apoc.kafka.common.strategy + +import org.junit.Test +import apoc.kafka.service.StreamsSinkEntity +import apoc.kafka.service.sink.strategy.NodePatternConfiguration +import apoc.kafka.service.sink.strategy.NodePatternIngestionStrategy +import apoc.kafka.utils.KafkaUtil +import kotlin.test.assertEquals + +class NodePatternIngestionStrategyTest { + + @Test + fun `should get all properties`() { + // given + val config = NodePatternConfiguration.parse("(:LabelA:LabelB{!id})") + val strategy = NodePatternIngestionStrategy(config) + val data = mapOf("id" to 1, "foo" to "foo", "bar" to "bar", "foobar" to "foobar") + + // when + val events = listOf(StreamsSinkEntity(data, data)) + val queryEvents = strategy.mergeNodeEvents(events) + + // then + assertEquals(""" + |${KafkaUtil.UNWIND} + |MERGE (n:LabelA:LabelB{id: event.keys.id}) + |SET n = event.properties + |SET n += event.keys + """.trimMargin(), queryEvents[0].query) + assertEquals(listOf(mapOf("keys" to mapOf("id" to 1), + "properties" to mapOf("foo" to "foo", "bar" to "bar", "foobar" to "foobar")) + ), + queryEvents[0].events) + assertEquals(emptyList(), strategy.deleteNodeEvents(events)) + assertEquals(emptyList(), strategy.deleteRelationshipEvents(events)) + assertEquals(emptyList(), strategy.mergeRelationshipEvents(events)) + } + + @Test + fun `should get nested properties`() { + // given + val config = NodePatternConfiguration.parse("(:LabelA:LabelB{!id, foo.bar})") + val strategy = NodePatternIngestionStrategy(config) + val data = mapOf("id" to 1, "foo" to mapOf("bar" to "bar", "foobar" to "foobar")) + + // when + val events = listOf(StreamsSinkEntity(data, data)) + val queryEvents = strategy.mergeNodeEvents(events) + + // then + assertEquals(1, queryEvents.size) + assertEquals(""" + |${KafkaUtil.UNWIND} + |MERGE (n:LabelA:LabelB{id: event.keys.id}) + |SET n = event.properties + |SET n += event.keys + """.trimMargin(), + queryEvents[0].query) + assertEquals(listOf(mapOf("keys" to mapOf("id" to 1), + "properties" to mapOf("foo.bar" to "bar"))), + queryEvents[0].events) + assertEquals(emptyList(), strategy.deleteNodeEvents(events)) + assertEquals(emptyList(), strategy.deleteRelationshipEvents(events)) + assertEquals(emptyList(), strategy.mergeRelationshipEvents(events)) + } + + @Test + fun `should exclude nested properties`() { + // given + val config = NodePatternConfiguration.parse("(:LabelA:LabelB{!id, -foo})") + val strategy = NodePatternIngestionStrategy(config) + val map = mapOf("id" to 1, "foo" to mapOf("bar" to "bar", "foobar" to "foobar"), "prop" to 100) + + // when + val events = listOf(StreamsSinkEntity(map, map)) + val queryEvents = strategy.mergeNodeEvents(events) + + // then + assertEquals(1, queryEvents.size) + assertEquals(""" + |${KafkaUtil.UNWIND} + |MERGE (n:LabelA:LabelB{id: event.keys.id}) + |SET n = event.properties + |SET n += event.keys + """.trimMargin(), + queryEvents[0].query) + assertEquals(listOf(mapOf("keys" to mapOf("id" to 1), + "properties" to mapOf("prop" to 100))), + queryEvents[0].events) + assertEquals(emptyList(), strategy.deleteNodeEvents(events)) + assertEquals(emptyList(), strategy.deleteRelationshipEvents(events)) + assertEquals(emptyList(), strategy.mergeRelationshipEvents(events)) + } + + @Test + fun `should include nested properties`() { + // given + val config = NodePatternConfiguration.parse("(:LabelA:LabelB{!id, foo})") + val strategy = NodePatternIngestionStrategy(config) + val data = mapOf("id" to 1, "foo" to mapOf("bar" to "bar", "foobar" to "foobar"), "prop" to 100) + + // when + val events = listOf(StreamsSinkEntity(data, data)) + val queryEvents = strategy.mergeNodeEvents(events) + + // then + assertEquals(1, queryEvents.size) + assertEquals(""" + |${KafkaUtil.UNWIND} + |MERGE (n:LabelA:LabelB{id: event.keys.id}) + |SET n = event.properties + |SET n += event.keys + """.trimMargin(), + queryEvents[0].query) + assertEquals(listOf(mapOf("keys" to mapOf("id" to 1), + "properties" to mapOf("foo.bar" to "bar", "foo.foobar" to "foobar"))), + queryEvents[0].events) + assertEquals(emptyList(), strategy.deleteNodeEvents(events)) + assertEquals(emptyList(), strategy.deleteRelationshipEvents(events)) + assertEquals(emptyList(), strategy.mergeRelationshipEvents(events)) + } + + @Test + fun `should exclude the properties`() { + // given + val config = NodePatternConfiguration.parse("(:LabelA:LabelB{!id,-foo,-bar})") + val strategy = NodePatternIngestionStrategy(config) + val data = mapOf("id" to 1, "foo" to "foo", "bar" to "bar", "foobar" to "foobar") + + // when + val events = listOf(StreamsSinkEntity(data, data)) + val queryEvents = strategy.mergeNodeEvents(events) + + // then + assertEquals(1, queryEvents.size) + assertEquals(""" + |${KafkaUtil.UNWIND} + |MERGE (n:LabelA:LabelB{id: event.keys.id}) + |SET n = event.properties + |SET n += event.keys + """.trimMargin(), queryEvents[0].query) + assertEquals(listOf(mapOf("keys" to mapOf("id" to 1), "properties" to mapOf("foobar" to "foobar"))), queryEvents[0].events) + assertEquals(emptyList(), strategy.deleteNodeEvents(events)) + assertEquals(emptyList(), strategy.deleteRelationshipEvents(events)) + assertEquals(emptyList(), strategy.mergeRelationshipEvents(events)) + } + + @Test + fun `should include the properties`() { + // given + val config = NodePatternConfiguration.parse("(:LabelA:LabelB{!id,foo,bar})") + val strategy = NodePatternIngestionStrategy(config) + val data = mapOf("id" to 1, "foo" to "foo", "bar" to "bar", "foobar" to "foobar") + + // when + val events = listOf(StreamsSinkEntity(data, data)) + val queryEvents = strategy.mergeNodeEvents(events) + + // then + assertEquals(""" + |${KafkaUtil.UNWIND} + |MERGE (n:LabelA:LabelB{id: event.keys.id}) + |SET n = event.properties + |SET n += event.keys + """.trimMargin(), queryEvents[0].query) + assertEquals(listOf(mapOf("keys" to mapOf("id" to 1), "properties" to mapOf("foo" to "foo", "bar" to "bar"))), queryEvents[0].events) + assertEquals(emptyList(), strategy.deleteNodeEvents(events)) + assertEquals(emptyList(), strategy.deleteRelationshipEvents(events)) + assertEquals(emptyList(), strategy.mergeRelationshipEvents(events)) + } + + @Test + fun `should delete the node`() { + // given + val config = NodePatternConfiguration.parse("(:LabelA:LabelB{!id})") + val strategy = NodePatternIngestionStrategy(config) + val data = mapOf("id" to 1, "foo" to "foo", "bar" to "bar", "foobar" to "foobar") + + // when + val events = listOf(StreamsSinkEntity(data, null)) + val queryEvents = strategy.deleteNodeEvents(events) + + // then + assertEquals(""" + |${KafkaUtil.UNWIND} + |MATCH (n:LabelA:LabelB{id: event.keys.id}) + |DETACH DELETE n + """.trimMargin(), queryEvents[0].query) + assertEquals(listOf(mapOf("keys" to mapOf("id" to 1))), + queryEvents[0].events) + assertEquals(emptyList(), strategy.mergeNodeEvents(events)) + assertEquals(emptyList(), strategy.deleteRelationshipEvents(events)) + assertEquals(emptyList(), strategy.mergeRelationshipEvents(events)) + } + +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/common/strategy/PatternConfigurationTest.kt b/extended/src/test/kotlin/apoc/kafka/common/strategy/PatternConfigurationTest.kt new file mode 100644 index 0000000000..ccbf862c78 --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/strategy/PatternConfigurationTest.kt @@ -0,0 +1,492 @@ +package apoc.kafka.common.strategy + +import apoc.kafka.service.sink.strategy.NodePatternConfiguration +import apoc.kafka.service.sink.strategy.PatternConfigurationType +import apoc.kafka.service.sink.strategy.RelationshipPatternConfiguration +import org.junit.Test +import kotlin.test.assertEquals + +class NodePatternConfigurationTest { + + @Test + fun `should extract all params`() { + // given + val pattern = "(:LabelA:LabelB{!id,*})" + + // when + val result = NodePatternConfiguration.parse(pattern) + + // then + val expected = NodePatternConfiguration(keys = setOf("id"), type = PatternConfigurationType.ALL, + labels = listOf("LabelA", "LabelB"), properties = emptyList()) + assertEquals(expected, result) + } + + @Test + fun `should extract all fixed params`() { + // given + val pattern = "(:LabelA{!id,foo,bar})" + + // when + val result = NodePatternConfiguration.parse(pattern) + + // then + val expected = NodePatternConfiguration(keys = setOf("id"), type = PatternConfigurationType.INCLUDE, + labels = listOf("LabelA"), properties = listOf("foo", "bar")) + assertEquals(expected, result) + } + + @Test + fun `should extract complex params`() { + // given + val pattern = "(:LabelA{!id,foo.bar})" + + // when + val result = NodePatternConfiguration.parse(pattern) + + // then + val expected = NodePatternConfiguration(keys = setOf("id"), type = PatternConfigurationType.INCLUDE, + labels = listOf("LabelA"), properties = listOf("foo.bar")) + assertEquals(expected, result) + } + + @Test + fun `should extract composite keys with fixed params`() { + // given + val pattern = "(:LabelA{!idA,!idB,foo,bar})" + + // when + val result = NodePatternConfiguration.parse(pattern) + + // then + val expected = NodePatternConfiguration(keys = setOf("idA", "idB"), type = PatternConfigurationType.INCLUDE, + labels = listOf("LabelA"), properties = listOf("foo", "bar")) + assertEquals(expected, result) + } + + @Test + fun `should extract all excluded params`() { + // given + val pattern = "(:LabelA{!id,-foo,-bar})" + + // when + val result = NodePatternConfiguration.parse(pattern) + + // then + val expected = NodePatternConfiguration(keys = setOf("id"), type = PatternConfigurationType.EXCLUDE, + labels = listOf("LabelA"), properties = listOf("foo", "bar")) + assertEquals(expected, result) + } + + @Test(expected = IllegalArgumentException::class) + fun `should throw an exception because of mixed configuration`() { + // given + val pattern = "(:LabelA{!id,-foo,bar})" + + try { + // when + NodePatternConfiguration.parse(pattern) + } catch (e: IllegalArgumentException) { + // then + assertEquals("The Node pattern $pattern is not homogeneous", e.message) + throw e + } + } + + @Test(expected = IllegalArgumentException::class) + fun `should throw an exception because of invalid pattern`() { + // given + val pattern = "(LabelA{!id,-foo,bar})" + + try { + // when + NodePatternConfiguration.parse(pattern) + } catch (e: IllegalArgumentException) { + // then + assertEquals("The Node pattern $pattern is invalid", e.message) + throw e + } + } + + @Test(expected = IllegalArgumentException::class) + fun `should throw an exception because the pattern should contains a key`() { + // given + val pattern = "(:LabelA{id,-foo,bar})" + + try { + // when + NodePatternConfiguration.parse(pattern) + } catch (e: IllegalArgumentException) { + // then + assertEquals("The Node pattern $pattern must contains at lest one key", e.message) + throw e + } + } + + @Test + fun `should extract all params - simple`() { + // given + val pattern = "LabelA:LabelB{!id,*}" + + // when + val result = NodePatternConfiguration.parse(pattern) + + // then + val expected = NodePatternConfiguration(keys = setOf("id"), type = PatternConfigurationType.ALL, + labels = listOf("LabelA", "LabelB"), properties = emptyList()) + assertEquals(expected, result) + } + + @Test + fun `should extract all fixed params - simple`() { + // given + val pattern = "LabelA{!id,foo,bar}" + + // when + val result = NodePatternConfiguration.parse(pattern) + + // then + val expected = NodePatternConfiguration(keys = setOf("id"), type = PatternConfigurationType.INCLUDE, + labels = listOf("LabelA"), properties = listOf("foo", "bar")) + assertEquals(expected, result) + } + + @Test + fun `should extract complex params - simple`() { + // given + val pattern = "LabelA{!id,foo.bar}" + + // when + val result = NodePatternConfiguration.parse(pattern) + + // then + val expected = NodePatternConfiguration(keys = setOf("id"), type = PatternConfigurationType.INCLUDE, + labels = listOf("LabelA"), properties = listOf("foo.bar")) + assertEquals(expected, result) + } + + @Test + fun `should extract composite keys with fixed params - simple`() { + // given + val pattern = "LabelA{!idA,!idB,foo,bar}" + + // when + val result = NodePatternConfiguration.parse(pattern) + + // then + val expected = NodePatternConfiguration(keys = setOf("idA", "idB"), type = PatternConfigurationType.INCLUDE, + labels = listOf("LabelA"), properties = listOf("foo", "bar")) + assertEquals(expected, result) + } + + @Test + fun `should extract all excluded params - simple`() { + // given + val pattern = "LabelA{!id,-foo,-bar}" + + // when + val result = NodePatternConfiguration.parse(pattern) + + // then + val expected = NodePatternConfiguration(keys = setOf("id"), type = PatternConfigurationType.EXCLUDE, + labels = listOf("LabelA"), properties = listOf("foo", "bar")) + assertEquals(expected, result) + } + + @Test(expected = IllegalArgumentException::class) + fun `should throw an exception because of mixed configuration - simple`() { + // given + val pattern = "LabelA{!id,-foo,bar}" + + try { + // when + NodePatternConfiguration.parse(pattern) + } catch (e: IllegalArgumentException) { + // then + assertEquals("The Node pattern $pattern is not homogeneous", e.message) + throw e + } + } + + @Test(expected = IllegalArgumentException::class) + fun `should throw an exception because the pattern should contains a key - simple`() { + // given + val pattern = "LabelA{id,-foo,bar}" + + try { + // when + NodePatternConfiguration.parse(pattern) + } catch (e: IllegalArgumentException) { + // then + assertEquals("The Node pattern $pattern must contains at lest one key", e.message) + throw e + } + } +} + +class RelationshipPatternConfigurationTest { + + @Test + fun `should extract all params`() { + // given + val startPattern = "LabelA{!id,aa}" + val endPattern = "LabelB{!idB,bb}" + val pattern = "(:$startPattern)-[:REL_TYPE]->(:$endPattern)" + + // when + val result = RelationshipPatternConfiguration.parse(pattern) + + // then + val start = NodePatternConfiguration.parse(startPattern) + val end = NodePatternConfiguration.parse(endPattern) + val properties = emptyList() + val relType = "REL_TYPE" + val expected = RelationshipPatternConfiguration(start = start, end = end, relType = relType, + properties = properties, type = PatternConfigurationType.ALL + ) + assertEquals(expected, result) + } + + @Test + fun `should extract all params with reverse source and target`() { + // given + val startPattern = "LabelA{!id,aa}" + val endPattern = "LabelB{!idB,bb}" + val pattern = "(:$startPattern)<-[:REL_TYPE]-(:$endPattern)" + + // when + val result = RelationshipPatternConfiguration.parse(pattern) + + // then + val start = NodePatternConfiguration.parse(startPattern) + val end = NodePatternConfiguration.parse(endPattern) + val properties = emptyList() + val relType = "REL_TYPE" + val expected = RelationshipPatternConfiguration(start = end, end = start, relType = relType, + properties = properties, type = PatternConfigurationType.ALL + ) + assertEquals(expected, result) + } + + @Test + fun `should extract all fixed params`() { + // given + val startPattern = "LabelA{!id}" + val endPattern = "LabelB{!idB}" + val pattern = "(:$startPattern)-[:REL_TYPE{foo, BAR}]->(:$endPattern)" + + // when + val result = RelationshipPatternConfiguration.parse(pattern) + + // then + val start = RelationshipPatternConfiguration.getNodeConf(startPattern) + val end = RelationshipPatternConfiguration.getNodeConf(endPattern) + val properties = listOf("foo", "BAR") + val relType = "REL_TYPE" + val expected = RelationshipPatternConfiguration(start = start, end = end, relType = relType, + properties = properties, type = PatternConfigurationType.INCLUDE + ) + assertEquals(expected, result) + } + + @Test + fun `should extract complex params`() { + // given + val startPattern = "LabelA{!id}" + val endPattern = "LabelB{!idB}" + val pattern = "(:$startPattern)-[:REL_TYPE{foo.BAR, BAR.foo}]->(:$endPattern)" + + // when + val result = RelationshipPatternConfiguration.parse(pattern) + + // then + val start = RelationshipPatternConfiguration.getNodeConf(startPattern) + val end = RelationshipPatternConfiguration.getNodeConf(endPattern) + val properties = listOf("foo.BAR", "BAR.foo") + val relType = "REL_TYPE" + val expected = RelationshipPatternConfiguration(start = start, end = end, relType = relType, + properties = properties, type = PatternConfigurationType.INCLUDE + ) + assertEquals(expected, result) + } + + @Test + fun `should extract all excluded params`() { + // given + val startPattern = "LabelA{!id}" + val endPattern = "LabelB{!idB}" + val pattern = "(:$startPattern)-[:REL_TYPE{-foo, -BAR}]->(:$endPattern)" + + // when + val result = RelationshipPatternConfiguration.parse(pattern) + + // then + val start = RelationshipPatternConfiguration.getNodeConf(startPattern) + val end = RelationshipPatternConfiguration.getNodeConf(endPattern) + val properties = listOf("foo", "BAR") + val relType = "REL_TYPE" + val expected = RelationshipPatternConfiguration(start = start, end = end, relType = relType, + properties = properties, type = PatternConfigurationType.EXCLUDE + ) + assertEquals(expected, result) + } + + @Test(expected = IllegalArgumentException::class) + fun `should throw an exception because of mixed configuration`() { + // given + val pattern = "(:LabelA{!id})-[:REL_TYPE{foo, -BAR}]->(:LabelB{!idB})" + + try { + // when + RelationshipPatternConfiguration.parse(pattern) + } catch (e: IllegalArgumentException) { + // then + assertEquals("The Relationship pattern $pattern is not homogeneous", e.message) + throw e + } + } + + @Test(expected = IllegalArgumentException::class) + fun `should throw an exception because the pattern should contains nodes with only ids`() { + // given + val pattern = "(:LabelA{id})-[:REL_TYPE{foo,BAR}]->(:LabelB{!idB})" + + try { + // when + RelationshipPatternConfiguration.parse(pattern) + } catch (e: IllegalArgumentException) { + // then + assertEquals("The Relationship pattern $pattern is invalid", e.message) + throw e + } + } + + @Test(expected = IllegalArgumentException::class) + fun `should throw an exception because the pattern is invalid`() { + // given + val pattern = "(LabelA{!id})-[:REL_TYPE{foo,BAR}]->(:LabelB{!idB})" + + try { + // when + RelationshipPatternConfiguration.parse(pattern) + } catch (e: IllegalArgumentException) { + // then + assertEquals("The Relationship pattern $pattern is invalid", e.message) + throw e + } + } + + @Test + fun `should extract all params - simple`() { + // given + val startPattern = "LabelA{!id,aa}" + val endPattern = "LabelB{!idB,bb}" + val pattern = "$startPattern REL_TYPE $endPattern" + + // when + val result = RelationshipPatternConfiguration.parse(pattern) + + // then + val start = NodePatternConfiguration.parse(startPattern) + val end = NodePatternConfiguration.parse(endPattern) + val properties = emptyList() + val relType = "REL_TYPE" + val expected = RelationshipPatternConfiguration(start = start, end = end, relType = relType, + properties = properties, type = PatternConfigurationType.ALL + ) + assertEquals(expected, result) + } + + @Test + fun `should extract all fixed params - simple`() { + // given + val startPattern = "LabelA{!id}" + val endPattern = "LabelB{!idB}" + val pattern = "$startPattern REL_TYPE{foo, BAR} $endPattern" + + // when + val result = RelationshipPatternConfiguration.parse(pattern) + + // then + val start = RelationshipPatternConfiguration.getNodeConf(startPattern) + val end = RelationshipPatternConfiguration.getNodeConf(endPattern) + val properties = listOf("foo", "BAR") + val relType = "REL_TYPE" + val expected = RelationshipPatternConfiguration(start = start, end = end, relType = relType, + properties = properties, type = PatternConfigurationType.INCLUDE + ) + assertEquals(expected, result) + } + + @Test + fun `should extract complex params - simple`() { + // given + val startPattern = "LabelA{!id}" + val endPattern = "LabelB{!idB}" + val pattern = "$startPattern REL_TYPE{foo.BAR, BAR.foo} $endPattern" + + // when + val result = RelationshipPatternConfiguration.parse(pattern) + + // then + val start = RelationshipPatternConfiguration.getNodeConf(startPattern) + val end = RelationshipPatternConfiguration.getNodeConf(endPattern) + val properties = listOf("foo.BAR", "BAR.foo") + val relType = "REL_TYPE" + val expected = RelationshipPatternConfiguration(start = start, end = end, relType = relType, + properties = properties, type = PatternConfigurationType.INCLUDE + ) + assertEquals(expected, result) + } + + @Test + fun `should extract all excluded params - simple`() { + // given + val startPattern = "LabelA{!id}" + val endPattern = "LabelB{!idB}" + val pattern = "$startPattern REL_TYPE{-foo, -BAR} $endPattern" + + // when + val result = RelationshipPatternConfiguration.parse(pattern) + + // then + val start = RelationshipPatternConfiguration.getNodeConf(startPattern) + val end = RelationshipPatternConfiguration.getNodeConf(endPattern) + val properties = listOf("foo", "BAR") + val relType = "REL_TYPE" + val expected = RelationshipPatternConfiguration(start = start, end = end, relType = relType, + properties = properties, type = PatternConfigurationType.EXCLUDE + ) + assertEquals(expected, result) + } + + @Test(expected = IllegalArgumentException::class) + fun `should throw an exception because of mixed configuration - simple`() { + // given + val pattern = "LabelA{!id} REL_TYPE{foo, -BAR} LabelB{!idB}" + + try { + // when + RelationshipPatternConfiguration.parse(pattern) + } catch (e: IllegalArgumentException) { + // then + assertEquals("The Relationship pattern $pattern is not homogeneous", e.message) + throw e + } + } + + @Test(expected = IllegalArgumentException::class) + fun `should throw an exception because the pattern should contains nodes with only ids - simple`() { + // given + val pattern = "LabelA{id} REL_TYPE{foo,BAR} LabelB{!idB}" + + try { + // when + RelationshipPatternConfiguration.parse(pattern) + } catch (e: IllegalArgumentException) { + // then + assertEquals("The Relationship pattern $pattern is invalid", e.message) + throw e + } + } +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/common/strategy/RelationshipPatternIngestionStrategyTest.kt b/extended/src/test/kotlin/apoc/kafka/common/strategy/RelationshipPatternIngestionStrategyTest.kt new file mode 100644 index 0000000000..de0ef4fb3a --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/strategy/RelationshipPatternIngestionStrategyTest.kt @@ -0,0 +1,196 @@ +package apoc.kafka.common.strategy + +import org.junit.Test +import apoc.kafka.service.StreamsSinkEntity +import apoc.kafka.service.sink.strategy.RelationshipPatternConfiguration +import apoc.kafka.service.sink.strategy.RelationshipPatternIngestionStrategy +import apoc.kafka.utils.KafkaUtil +import kotlin.test.assertEquals + +class RelationshipPatternIngestionStrategyTest { + + @Test + fun `should get all properties`() { + // given + val startPattern = "LabelA{!idStart}" + val endPattern = "LabelB{!idEnd}" + val pattern = "(:$startPattern)-[:REL_TYPE]->(:$endPattern)" + val config = RelationshipPatternConfiguration.parse(pattern) + val strategy = RelationshipPatternIngestionStrategy(config) + val data = mapOf("idStart" to 1, "idEnd" to 2, + "foo" to "foo", + "bar" to "bar") + + // when + val events = listOf(StreamsSinkEntity(data, data)) + val queryEvents = strategy.mergeRelationshipEvents(events) + + // then + assertEquals(1, queryEvents.size) + assertEquals(""" + |${KafkaUtil.UNWIND} + |MERGE (start:LabelA{idStart: event.start.keys.idStart}) + |SET start = event.start.properties + |SET start += event.start.keys + |MERGE (end:LabelB{idEnd: event.end.keys.idEnd}) + |SET end = event.end.properties + |SET end += event.end.keys + |MERGE (start)-[r:REL_TYPE]->(end) + |SET r = event.properties + """.trimMargin(), queryEvents[0].query) + assertEquals(listOf(mapOf("start" to mapOf("keys" to mapOf("idStart" to 1), "properties" to emptyMap()), + "end" to mapOf("keys" to mapOf("idEnd" to 2), "properties" to emptyMap()), + "properties" to mapOf("foo" to "foo", "bar" to "bar"))), queryEvents[0].events) + assertEquals(emptyList(), strategy.deleteNodeEvents(events)) + assertEquals(emptyList(), strategy.deleteRelationshipEvents(events)) + assertEquals(emptyList(), strategy.mergeNodeEvents(events)) + } + + @Test + fun `should get all properties - simple`() { + // given + val startPattern = "LabelA{!idStart}" + val endPattern = "LabelB{!idEnd}" + val pattern = "$startPattern REL_TYPE $endPattern" + val config = RelationshipPatternConfiguration.parse(pattern) + val strategy = RelationshipPatternIngestionStrategy(config) + val data = mapOf("idStart" to 1, "idEnd" to 2, + "foo" to "foo", + "bar" to "bar") + + // when + val events = listOf(StreamsSinkEntity(data, data)) + val queryEvents = strategy.mergeRelationshipEvents(events) + + // then + assertEquals(1, queryEvents.size) + assertEquals(""" + |${KafkaUtil.UNWIND} + |MERGE (start:LabelA{idStart: event.start.keys.idStart}) + |SET start = event.start.properties + |SET start += event.start.keys + |MERGE (end:LabelB{idEnd: event.end.keys.idEnd}) + |SET end = event.end.properties + |SET end += event.end.keys + |MERGE (start)-[r:REL_TYPE]->(end) + |SET r = event.properties + """.trimMargin(), queryEvents[0].query) + assertEquals(listOf(mapOf("start" to mapOf("keys" to mapOf("idStart" to 1), "properties" to emptyMap()), + "end" to mapOf("keys" to mapOf("idEnd" to 2), "properties" to emptyMap()), + "properties" to mapOf("foo" to "foo", "bar" to "bar"))), queryEvents[0].events) + assertEquals(emptyList(), strategy.deleteNodeEvents(events)) + assertEquals(emptyList(), strategy.deleteRelationshipEvents(events)) + assertEquals(emptyList(), strategy.mergeNodeEvents(events)) + } + + @Test + fun `should get all properties with reverse start-end`() { + // given + val startPattern = "LabelA{!idStart}" + val endPattern = "LabelB{!idEnd}" + val pattern = "(:$endPattern)<-[:REL_TYPE]-(:$startPattern)" + val config = RelationshipPatternConfiguration.parse(pattern) + val strategy = RelationshipPatternIngestionStrategy(config) + val data = mapOf("idStart" to 1, "idEnd" to 2, + "foo" to "foo", + "bar" to "bar") + + // when + val events = listOf(StreamsSinkEntity(data, data)) + val queryEvents = strategy.mergeRelationshipEvents(events) + + // then + assertEquals(1, queryEvents.size) + assertEquals(""" + |${KafkaUtil.UNWIND} + |MERGE (start:LabelA{idStart: event.start.keys.idStart}) + |SET start = event.start.properties + |SET start += event.start.keys + |MERGE (end:LabelB{idEnd: event.end.keys.idEnd}) + |SET end = event.end.properties + |SET end += event.end.keys + |MERGE (start)-[r:REL_TYPE]->(end) + |SET r = event.properties + """.trimMargin(), queryEvents[0].query) + assertEquals(listOf(mapOf("start" to mapOf("keys" to mapOf("idStart" to 1), "properties" to emptyMap()), + "end" to mapOf("keys" to mapOf("idEnd" to 2), "properties" to emptyMap()), + "properties" to mapOf("foo" to "foo", "bar" to "bar"))), queryEvents[0].events) + assertEquals(emptyList(), strategy.deleteNodeEvents(events)) + assertEquals(emptyList(), strategy.deleteRelationshipEvents(events)) + assertEquals(emptyList(), strategy.mergeNodeEvents(events)) + } + + @Test + fun `should get nested properties`() { + // given + val startPattern = "LabelA{!idStart, foo.mapFoo}" + val endPattern = "LabelB{!idEnd, bar.mapBar}" + val pattern = "(:$startPattern)-[:REL_TYPE]->(:$endPattern)" + val config = RelationshipPatternConfiguration.parse(pattern) + val strategy = RelationshipPatternIngestionStrategy(config) + val data = mapOf("idStart" to 1, "idEnd" to 2, + "foo" to mapOf("mapFoo" to "mapFoo"), + "bar" to mapOf("mapBar" to "mapBar"), + "rel" to 1, + "map" to mapOf("a" to "a", "inner" to mapOf("b" to "b"))) + + // when + val events = listOf(StreamsSinkEntity(data, data)) + val queryEvents = strategy.mergeRelationshipEvents(events) + + // then + assertEquals(1, queryEvents.size) + assertEquals(""" + |${KafkaUtil.UNWIND} + |MERGE (start:LabelA{idStart: event.start.keys.idStart}) + |SET start = event.start.properties + |SET start += event.start.keys + |MERGE (end:LabelB{idEnd: event.end.keys.idEnd}) + |SET end = event.end.properties + |SET end += event.end.keys + |MERGE (start)-[r:REL_TYPE]->(end) + |SET r = event.properties + """.trimMargin(), queryEvents[0].query) + assertEquals(listOf( + mapOf("start" to mapOf("keys" to mapOf("idStart" to 1), "properties" to mapOf("foo.mapFoo" to "mapFoo")), + "end" to mapOf("keys" to mapOf("idEnd" to 2), "properties" to mapOf("bar.mapBar" to "mapBar")), + "properties" to mapOf("rel" to 1, "map.a" to "a", "map.inner.b" to "b")) + ), queryEvents[0].events) + assertEquals(emptyList(), strategy.deleteNodeEvents(events)) + assertEquals(emptyList(), strategy.deleteRelationshipEvents(events)) + assertEquals(emptyList(), strategy.mergeNodeEvents(events)) + } + + @Test + fun `should delete the relationship`() { + // given + val startPattern = "LabelA{!idStart}" + val endPattern = "LabelB{!idEnd}" + val pattern = "(:$startPattern)-[:REL_TYPE]->(:$endPattern)" + val config = RelationshipPatternConfiguration.parse(pattern) + val strategy = RelationshipPatternIngestionStrategy(config) + val data = mapOf("idStart" to 1, "idEnd" to 2, + "foo" to "foo", + "bar" to "bar") + + // when + val events = listOf(StreamsSinkEntity(data, null)) + val queryEvents = strategy.deleteRelationshipEvents(events) + + // then + assertEquals(1, queryEvents.size) + assertEquals(""" + |${KafkaUtil.UNWIND} + |MATCH (start:LabelA{idStart: event.start.keys.idStart}) + |MATCH (end:LabelB{idEnd: event.end.keys.idEnd}) + |MATCH (start)-[r:REL_TYPE]->(end) + |DELETE r + """.trimMargin(), queryEvents[0].query) + assertEquals(listOf(mapOf("start" to mapOf("keys" to mapOf("idStart" to 1), "properties" to emptyMap()), + "end" to mapOf("keys" to mapOf("idEnd" to 2), "properties" to emptyMap()))), queryEvents[0].events) + assertEquals(emptyList(), strategy.deleteNodeEvents(events)) + assertEquals(emptyList(), strategy.mergeRelationshipEvents(events)) + assertEquals(emptyList(), strategy.mergeNodeEvents(events)) + } + +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/common/strategy/SchemaIngestionStrategyTest.kt b/extended/src/test/kotlin/apoc/kafka/common/strategy/SchemaIngestionStrategyTest.kt new file mode 100644 index 0000000000..3f9503cbf0 --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/strategy/SchemaIngestionStrategyTest.kt @@ -0,0 +1,496 @@ +package apoc.kafka.common.strategy + +import org.junit.Test +import apoc.kafka.events.* +import apoc.kafka.service.StreamsSinkEntity +import apoc.kafka.service.sink.strategy.SchemaIngestionStrategy +import apoc.kafka.utils.KafkaUtil +import kotlin.test.assertEquals +import kotlin.test.assertTrue + +class SchemaIngestionStrategyTest { + + @Test + fun `should create the Schema Query Strategy for mixed events`() { + // given + val constraints = listOf(Constraint(label = "User", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("name", "surname"))) + val nodeSchema = Schema(properties = mapOf("name" to "String", "surname" to "String", "comp@ny" to "String"), constraints = constraints) + val relSchema = Schema(properties = mapOf("since" to "Long"), constraints = constraints) + val cdcDataStart = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 0, + txEventsCount = 3, + operation = OperationType.created + ), + payload = NodePayload(id = "0", + before = null, + after = NodeChange(properties = mapOf("name" to "Andrea", "surname" to "Santurbano", "comp@ny" to "LARUS-BA"), labels = listOf("User")) + ), + schema = nodeSchema + ) + val cdcDataEnd = StreamsTransactionEvent(meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 1, + txEventsCount = 3, + operation = OperationType.created + ), + payload = NodePayload(id = "1", + before = null, + after = NodeChange(properties = mapOf("name" to "Michael", "surname" to "Hunger", "comp@ny" to "Neo4j"), labels = listOf("User")) + ), + schema = nodeSchema + ) + val cdcDataRelationship = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 2, + txEventsCount = 3, + operation = OperationType.created + ), + payload = RelationshipPayload( + id = "2", + start = RelationshipNodeChange(id = "0", labels = listOf("User", "NewLabel"), ids = mapOf("name" to "Andrea", "surname" to "Santurbano")), + end = RelationshipNodeChange(id = "1", labels = listOf("User", "NewLabel"), ids = mapOf("name" to "Michael", "surname" to "Hunger")), + after = RelationshipChange(properties = mapOf("since" to 2014)), + before = null, + label = "KNOWS WHO" + ), + schema = relSchema + ) + val cdcQueryStrategy = SchemaIngestionStrategy() + val txEvents = listOf(StreamsSinkEntity(cdcDataStart, cdcDataStart), + StreamsSinkEntity(cdcDataEnd, cdcDataEnd), + StreamsSinkEntity(cdcDataRelationship, cdcDataRelationship)) + + // when + val nodeEvents = cdcQueryStrategy.mergeNodeEvents(txEvents) + val nodeDeleteEvents = cdcQueryStrategy.deleteNodeEvents(txEvents) + + val relationshipEvents = cdcQueryStrategy.mergeRelationshipEvents(txEvents) + val relationshipDeleteEvents = cdcQueryStrategy.deleteRelationshipEvents(txEvents) + + // then + assertEquals(0, nodeDeleteEvents.size) + assertEquals(1, nodeEvents.size) + val nodeQuery = nodeEvents[0].query + val expectedNodeQuery = """ + |${KafkaUtil.UNWIND} + |MERGE (n:User{surname: event.properties.surname, name: event.properties.name}) + |SET n = event.properties + """.trimMargin() + assertEquals(expectedNodeQuery, nodeQuery.trimIndent()) + val eventsNodeList = nodeEvents[0].events + assertEquals(2, eventsNodeList.size) + val expectedNodeEvents = listOf( + mapOf("properties" to mapOf("name" to "Andrea", "surname" to "Santurbano", "comp@ny" to "LARUS-BA")), + mapOf("properties" to mapOf("name" to "Michael", "surname" to "Hunger", "comp@ny" to "Neo4j")) + ) + assertEquals(expectedNodeEvents, eventsNodeList) + + assertEquals(0, relationshipDeleteEvents.size) + assertEquals(1, relationshipEvents.size) + val relQuery = relationshipEvents[0].query + val expectedRelQuery = """ + |${KafkaUtil.UNWIND} + |MERGE (start:User{name: event.start.name, surname: event.start.surname}) + |MERGE (end:User{name: event.end.name, surname: event.end.surname}) + |MERGE (start)-[r:`KNOWS WHO`]->(end) + |SET r = event.properties + """.trimMargin() + assertEquals(expectedRelQuery, relQuery.trimIndent()) + val eventsRelList = relationshipEvents[0].events + assertEquals(1, eventsRelList.size) + val expectedRelEvents = listOf( + mapOf("start" to mapOf("name" to "Andrea", "surname" to "Santurbano"), + "end" to mapOf("name" to "Michael", "surname" to "Hunger"), "properties" to mapOf("since" to 2014)) + ) + assertEquals(expectedRelEvents, eventsRelList) + } + + @Test + fun `should create the Schema Query Strategy for nodes`() { + // given + val nodeSchema = Schema(properties = mapOf("name" to "String", "surname" to "String", "comp@ny" to "String"), + constraints = listOf(Constraint(label = "User", type = StreamsConstraintType.UNIQUE, properties = setOf("name", "surname")))) + val cdcDataStart = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 0, + txEventsCount = 3, + operation = OperationType.updated + ), + payload = NodePayload(id = "0", + before = NodeChange(properties = mapOf("name" to "Andrea", "surname" to "Santurbano"), labels = listOf("User", "ToRemove")), + after = NodeChange(properties = mapOf("name" to "Andrea", "surname" to "Santurbano", "comp@ny" to "LARUS-BA"), labels = listOf("User", "NewLabel")) + ), + schema = nodeSchema + ) + val cdcDataEnd = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 1, + txEventsCount = 3, + operation = OperationType.updated + ), + payload = NodePayload(id = "1", + before = NodeChange(properties = mapOf("name" to "Michael", "surname" to "Hunger"), labels = listOf("User", "ToRemove")), + after = NodeChange(properties = mapOf("name" to "Michael", "surname" to "Hunger", "comp@ny" to "Neo4j"), labels = listOf("User", "NewLabel")) + ), + schema = nodeSchema + ) + val cdcQueryStrategy = SchemaIngestionStrategy() + val txEvents = listOf( + StreamsSinkEntity(cdcDataStart, cdcDataStart), + StreamsSinkEntity(cdcDataEnd, cdcDataEnd)) + + // when + val nodeEvents = cdcQueryStrategy.mergeNodeEvents(txEvents) + val nodeDeleteEvents = cdcQueryStrategy.deleteNodeEvents(txEvents) + + // then + assertEquals(0, nodeDeleteEvents.size) + assertEquals(1, nodeEvents.size) + val nodeQuery = nodeEvents[0].query + val expectedNodeQuery = """ + |${KafkaUtil.UNWIND} + |MERGE (n:User{surname: event.properties.surname, name: event.properties.name}) + |SET n = event.properties + |SET n:NewLabel + |REMOVE n:ToRemove + """.trimMargin() + assertEquals(expectedNodeQuery, nodeQuery.trimIndent()) + val eventsNodeList = nodeEvents[0].events + assertEquals(2, eventsNodeList.size) + val expectedNodeEvents = listOf( + mapOf("properties" to mapOf("name" to "Andrea", "surname" to "Santurbano", "comp@ny" to "LARUS-BA")), + mapOf("properties" to mapOf("name" to "Michael", "surname" to "Hunger", "comp@ny" to "Neo4j")) + ) + assertEquals(expectedNodeEvents, eventsNodeList) + } + + @Test + fun `should create the Schema Query Strategy for relationships`() { + // given + val relSchema = Schema(properties = mapOf("since" to "Long"), constraints = listOf( + Constraint(label = "User Ext", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("name", "surname")), + Constraint(label = "Product Ext", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("name")))) + val cdcDataRelationship = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 2, + txEventsCount = 3, + operation = OperationType.updated + ), + payload = RelationshipPayload( + id = "2", + start = RelationshipNodeChange(id = "1", labels = listOf("User Ext", "NewLabel"), ids = mapOf("name" to "Michael", "surname" to "Hunger")), + end = RelationshipNodeChange(id = "2", labels = listOf("Product Ext", "NewLabelA"), ids = mapOf("name" to "My Awesome Product")), + after = RelationshipChange(properties = mapOf("since" to 2014)), + before = null, + label = "HAS BOUGHT" + ), + schema = relSchema + ) + val cdcQueryStrategy = SchemaIngestionStrategy() + val txEvents = listOf(StreamsSinkEntity(cdcDataRelationship, cdcDataRelationship)) + + // when + val relationshipEvents = cdcQueryStrategy.mergeRelationshipEvents(txEvents) + val relationshipDeleteEvents = cdcQueryStrategy.deleteRelationshipEvents(txEvents) + + // then + assertEquals(0, relationshipDeleteEvents.size) + assertEquals(1, relationshipEvents.size) + val relQuery = relationshipEvents[0].query + val expectedRelQuery = """ + |${KafkaUtil.UNWIND} + |MERGE (start:`User Ext`{name: event.start.name, surname: event.start.surname}) + |MERGE (end:`Product Ext`{name: event.end.name}) + |MERGE (start)-[r:`HAS BOUGHT`]->(end) + |SET r = event.properties + """.trimMargin() + assertEquals(expectedRelQuery, relQuery.trimIndent()) + val eventsRelList = relationshipEvents[0].events + assertEquals(1, eventsRelList.size) + val expectedRelEvents = listOf( + mapOf("start" to mapOf("name" to "Michael", "surname" to "Hunger"), + "end" to mapOf("name" to "My Awesome Product"), + "properties" to mapOf("since" to 2014)) + ) + assertEquals(expectedRelEvents, eventsRelList) + } + + @Test + fun `should create the Schema Query Strategy for relationships with multiple unique constraints`() { + // the Schema Query Strategy leverage the first constraint with lowest properties + // with the same size, we take the first sorted properties list alphabetically + + // given + // we shuffle the constraints to ensure that the result doesn't depend from the ordering + val constraintsList = listOf( + Constraint(label = "User Ext", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("address")), + Constraint(label = "User Ext", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("country")), + Constraint(label = "User Ext", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("name", "surname")), + Constraint(label = "User Ext", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("profession", "another_one")), + Constraint(label = "Product Ext", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("code")), + Constraint(label = "Product Ext", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("name")) + ).shuffled() + + val relSchema = Schema(properties = mapOf("since" to "Long"), constraints = constraintsList) + val idsStart = mapOf("name" to "Sherlock", + "surname" to "Holmes", + "country" to "UK", + "profession" to "detective", + "another_one" to "foo", + "address" to "Baker Street") + val idsEnd = mapOf("name" to "My Awesome Product", "code" to 17294) + + val cdcDataRelationship = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 2, + txEventsCount = 3, + operation = OperationType.updated + ), + payload = RelationshipPayload( + id = "2", + start = RelationshipNodeChange(id = "1", labels = listOf("User Ext", "NewLabel"), ids = idsStart), + end = RelationshipNodeChange(id = "2", labels = listOf("Product Ext", "NewLabelA"), ids = idsEnd), + after = RelationshipChange(properties = mapOf("since" to 2014)), + before = null, + label = "HAS BOUGHT" + ), + schema = relSchema + ) + val cdcQueryStrategy = SchemaIngestionStrategy() + val txEvents = listOf(StreamsSinkEntity(cdcDataRelationship, cdcDataRelationship)) + + // when + val relationshipEvents = cdcQueryStrategy.mergeRelationshipEvents(txEvents) + val relationshipDeleteEvents = cdcQueryStrategy.deleteRelationshipEvents(txEvents) + + // then + assertEquals(0, relationshipDeleteEvents.size) + assertEquals(1, relationshipEvents.size) + val relQuery = relationshipEvents[0].query + val expectedRelQuery = """ + |${KafkaUtil.UNWIND} + |MERGE (start:`User Ext`{address: event.start.address}) + |MERGE (end:`Product Ext`{code: event.end.code}) + |MERGE (start)-[r:`HAS BOUGHT`]->(end) + |SET r = event.properties + """.trimMargin() + assertEquals(expectedRelQuery, relQuery.trimIndent()) + val eventsRelList = relationshipEvents[0].events + assertEquals(1, eventsRelList.size) + val expectedRelEvents = listOf( + mapOf("start" to mapOf("address" to "Baker Street"), + "end" to mapOf("code" to 17294), + "properties" to mapOf("since" to 2014)) + ) + assertEquals(expectedRelEvents, eventsRelList) + } + + @Test + fun `should create the Schema Query Strategy for relationships with multiple unique constraints and labels`() { + // the Schema Query Strategy leverage the first constraint with lowest properties + // with the same size, we take the first label in alphabetical order + // finally, with same label name, we take the first sorted properties list alphabetically + + // given + // we shuffle the constraints to ensure that the result doesn't depend from the ordering + val constraintsList = listOf( + Constraint(label = "User Ext", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("address")), + Constraint(label = "User Ext", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("country")), + Constraint(label = "User AAA", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("another_two")), + Constraint(label = "User Ext", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("name", "surname")), + Constraint(label = "User Ext", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("profession", "another_one")), + Constraint(label = "Product Ext", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("code")), + Constraint(label = "Product Ext", type = StreamsConstraintType.UNIQUE, properties = linkedSetOf("name")) + ).shuffled() + + val relSchema = Schema(properties = mapOf("since" to "Long"), constraints = constraintsList) + val idsStart = mapOf("name" to "Sherlock", + "surname" to "Holmes", + "country" to "UK", + "profession" to "detective", + "another_one" to "foo", + "address" to "Baker Street", + "another_two" to "Dunno") + val idsEnd = mapOf("name" to "My Awesome Product", "code" to 17294) + + val cdcDataRelationship = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 2, + txEventsCount = 3, + operation = OperationType.updated + ), + payload = RelationshipPayload( + id = "2", + start = RelationshipNodeChange(id = "1", labels = listOf("User Ext", "User AAA", "NewLabel"), ids = idsStart), + end = RelationshipNodeChange(id = "2", labels = listOf("Product Ext", "NewLabelA"), ids = idsEnd), + after = RelationshipChange(properties = mapOf("since" to 2014)), + before = null, + label = "HAS BOUGHT" + ), + schema = relSchema + ) + val cdcQueryStrategy = SchemaIngestionStrategy() + val txEvents = listOf(StreamsSinkEntity(cdcDataRelationship, cdcDataRelationship)) + + // when + val relationshipEvents = cdcQueryStrategy.mergeRelationshipEvents(txEvents) + val relationshipDeleteEvents = cdcQueryStrategy.deleteRelationshipEvents(txEvents) + + // then + assertEquals(0, relationshipDeleteEvents.size) + assertEquals(1, relationshipEvents.size) + val relQuery = relationshipEvents[0].query + val expectedRelQueryOne = """ + |${KafkaUtil.UNWIND} + |MERGE (start:`User AAA`:`User Ext`{another_two: event.start.another_two}) + |MERGE (end:`Product Ext`{code: event.end.code}) + |MERGE (start)-[r:`HAS BOUGHT`]->(end) + |SET r = event.properties + """.trimMargin() + val expectedRelQueryTwo = """ + |${KafkaUtil.UNWIND} + |MERGE (start:`User Ext`:`User AAA`{another_two: event.start.another_two}) + |MERGE (end:`Product Ext`{code: event.end.code}) + |MERGE (start)-[r:`HAS BOUGHT`]->(end) + |SET r = event.properties + """.trimMargin() + assertTrue { listOf(expectedRelQueryOne, expectedRelQueryTwo).contains(relQuery.trimIndent()) } + val eventsRelList = relationshipEvents[0].events + assertEquals(1, eventsRelList.size) + val expectedRelEvents = listOf( + mapOf("start" to mapOf("another_two" to "Dunno"), + "end" to mapOf("code" to 17294), + "properties" to mapOf("since" to 2014)) + ) + assertEquals(expectedRelEvents, eventsRelList) + } + + @Test + fun `should create the Schema Query Strategy for node deletes`() { + // given + val nodeSchema = Schema(properties = mapOf("name" to "String", "surname" to "String", "comp@ny" to "String"), + constraints = listOf(Constraint(label = "User", type = StreamsConstraintType.UNIQUE, properties = setOf("name", "surname")))) + val cdcDataStart = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 0, + txEventsCount = 3, + operation = OperationType.deleted + ), + payload = NodePayload(id = "0", + before = NodeChange(properties = mapOf("name" to "Andrea", "surname" to "Santurbano", "comp@ny" to "LARUS-BA"), labels = listOf("User")), + after = null + ), + schema = nodeSchema + ) + val cdcDataEnd = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 1, + txEventsCount = 3, + operation = OperationType.deleted + ), + payload = NodePayload(id = "1", + before = NodeChange(properties = mapOf("name" to "Michael", "surname" to "Hunger", "comp@ny" to "Neo4j"), labels = listOf("User")), + after = null + ), + schema = nodeSchema + ) + val cdcQueryStrategy = SchemaIngestionStrategy() + val txEvents = listOf( + StreamsSinkEntity(cdcDataStart, cdcDataStart), + StreamsSinkEntity(cdcDataEnd, cdcDataEnd)) + + // when + val nodeEvents = cdcQueryStrategy.mergeNodeEvents(txEvents) + val nodeDeleteEvents = cdcQueryStrategy.deleteNodeEvents(txEvents) + + // then + assertEquals(1, nodeDeleteEvents.size) + assertEquals(0, nodeEvents.size) + val nodeQuery = nodeDeleteEvents[0].query + val expectedNodeQuery = """ + |${KafkaUtil.UNWIND} + |MATCH (n:User{surname: event.properties.surname, name: event.properties.name}) + |DETACH DELETE n + """.trimMargin() + assertEquals(expectedNodeQuery, nodeQuery.trimIndent()) + val eventsNodeList = nodeDeleteEvents[0].events + assertEquals(2, eventsNodeList.size) + val expectedNodeEvents = listOf( + mapOf("properties" to mapOf("name" to "Andrea", "surname" to "Santurbano", "comp@ny" to "LARUS-BA")), + mapOf("properties" to mapOf("name" to "Michael", "surname" to "Hunger", "comp@ny" to "Neo4j")) + ) + assertEquals(expectedNodeEvents, eventsNodeList) + } + + @Test + fun `should create the Schema Query Strategy for relationships deletes`() { + // given + val relSchema = Schema(properties = mapOf("since" to "Long"), + constraints = listOf(Constraint(label = "User", type = StreamsConstraintType.UNIQUE, properties = setOf("name", "surname")))) + val cdcDataRelationship = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 2, + txEventsCount = 3, + operation = OperationType.deleted + ), + payload = RelationshipPayload( + id = "2", + start = RelationshipNodeChange(id = "0", labels = listOf("User", "NewLabel"), ids = mapOf("name" to "Andrea", "surname" to "Santurbano")), + end = RelationshipNodeChange(id = "1", labels = listOf("User", "NewLabel"), ids = mapOf("name" to "Michael", "surname" to "Hunger")), + after = RelationshipChange(properties = mapOf("since" to 2014, "foo" to "label")), + before = RelationshipChange(properties = mapOf("since" to 2014)), + label = "KNOWS WHO" + ), + schema = relSchema + ) + val cdcQueryStrategy = SchemaIngestionStrategy() + val txEvents = listOf(StreamsSinkEntity(cdcDataRelationship, cdcDataRelationship)) + + // when + val relationshipEvents = cdcQueryStrategy.mergeRelationshipEvents(txEvents) + val relationshipDeleteEvents = cdcQueryStrategy.deleteRelationshipEvents(txEvents) + + // then + assertEquals(1, relationshipDeleteEvents.size) + assertEquals(0, relationshipEvents.size) + val relQuery = relationshipDeleteEvents[0].query + val expectedRelQuery = """ + |${KafkaUtil.UNWIND} + |MATCH (start:User{name: event.start.name, surname: event.start.surname}) + |MATCH (end:User{name: event.end.name, surname: event.end.surname}) + |MATCH (start)-[r:`KNOWS WHO`]->(end) + |DELETE r + """.trimMargin() + assertEquals(expectedRelQuery, relQuery.trimIndent()) + val eventsRelList = relationshipDeleteEvents[0].events + assertEquals(1, eventsRelList.size) + val expectedRelEvents = listOf( + mapOf("start" to mapOf("name" to "Andrea", "surname" to "Santurbano"), + "end" to mapOf("name" to "Michael", "surname" to "Hunger")) + ) + assertEquals(expectedRelEvents, eventsRelList) + } + +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/common/strategy/SourceIdIngestionStrategyTest.kt b/extended/src/test/kotlin/apoc/kafka/common/strategy/SourceIdIngestionStrategyTest.kt new file mode 100644 index 0000000000..773992dc1d --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/strategy/SourceIdIngestionStrategyTest.kt @@ -0,0 +1,331 @@ +package apoc.kafka.common.strategy + +import org.junit.Test +import apoc.kafka.events.* +import apoc.kafka.service.StreamsSinkEntity +import apoc.kafka.service.sink.strategy.SourceIdIngestionStrategy +import apoc.kafka.service.sink.strategy.SourceIdIngestionStrategyConfig +import apoc.kafka.utils.KafkaUtil +import kotlin.test.assertEquals + +class SourceIdIngestionStrategyTest { + + @Test + fun `should create the Merge Query Strategy for mixed events`() { + // given + val cdcDataStart = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 0, + txEventsCount = 3, + operation = OperationType.created + ), + payload = NodePayload(id = "0", + before = null, + after = NodeChange(properties = mapOf("name" to "Andrea", "comp@ny" to "LARUS-BA"), labels = listOf("User")) + ), + schema = Schema() + ) + val cdcDataEnd = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 1, + txEventsCount = 3, + operation = OperationType.created + ), + payload = NodePayload(id = "1", + before = null, + after = NodeChange(properties = mapOf("name" to "Michael", "comp@ny" to "Neo4j"), labels = listOf("User")) + ), + schema = Schema() + ) + val cdcDataRelationship = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 2, + txEventsCount = 3, + operation = OperationType.created + ), + payload = RelationshipPayload( + id = "2", + start = RelationshipNodeChange(id = "0", labels = listOf("User"), ids = emptyMap()), + end = RelationshipNodeChange(id = "1", labels = listOf("User"), ids = emptyMap()), + after = RelationshipChange(properties = mapOf("since" to 2014)), + before = null, + label = "KNOWS WHO" + ), + schema = Schema() + ) + val config = SourceIdIngestionStrategyConfig(labelName = "Custom SourceEvent", idName = "custom Id") + val cdcQueryStrategy = SourceIdIngestionStrategy(config) + val txEvents = listOf( + StreamsSinkEntity(cdcDataStart, cdcDataStart), + StreamsSinkEntity(cdcDataEnd, cdcDataEnd), + StreamsSinkEntity(cdcDataRelationship, cdcDataRelationship)) + + // when + val nodeEvents = cdcQueryStrategy.mergeNodeEvents(txEvents) + val nodeDeleteEvents = cdcQueryStrategy.deleteNodeEvents(txEvents) + + val relationshipEvents = cdcQueryStrategy.mergeRelationshipEvents(txEvents) + val relationshipDeleteEvents = cdcQueryStrategy.deleteRelationshipEvents(txEvents) + + // then + assertEquals(0, nodeDeleteEvents.size) + assertEquals(1, nodeEvents.size) + val nodeQuery = nodeEvents[0].query + val expectedNodeQuery = """ + |${KafkaUtil.UNWIND} + |MERGE (n:`Custom SourceEvent`{`custom Id`: event.id}) + |SET n = event.properties + |SET n.`custom Id` = event.id + |SET n:User + """.trimMargin() + assertEquals(expectedNodeQuery, nodeQuery.trimIndent()) + val eventsNodeList = nodeEvents[0].events + assertEquals(2, eventsNodeList.size) + val expectedNodeEvents = listOf( + mapOf("id" to "0", "properties" to mapOf("name" to "Andrea", "comp@ny" to "LARUS-BA")), + mapOf("id" to "1", "properties" to mapOf("name" to "Michael", "comp@ny" to "Neo4j")) + ) + assertEquals(expectedNodeEvents, eventsNodeList) + + assertEquals(0, relationshipDeleteEvents.size) + assertEquals(1, relationshipEvents.size) + val relQuery = relationshipEvents[0].query + val expectedRelQuery = """ + |${KafkaUtil.UNWIND} + |MERGE (start:`Custom SourceEvent`{`custom Id`: event.start}) + |MERGE (end:`Custom SourceEvent`{`custom Id`: event.end}) + |MERGE (start)-[r:`KNOWS WHO`{`custom Id`: event.id}]->(end) + |SET r = event.properties + |SET r.`custom Id` = event.id + """.trimMargin() + assertEquals(expectedRelQuery, relQuery.trimIndent()) + val eventsRelList = relationshipEvents[0].events + assertEquals(1, eventsRelList.size) + val expectedRelEvents = listOf( + mapOf("id" to "2", "start" to "0", "end" to "1", "properties" to mapOf("since" to 2014)) + ) + assertEquals(expectedRelEvents, eventsRelList) + } + + @Test + fun `should create the Merge Query Strategy for node updates`() { + // given + val nodeSchema = Schema() + // given + val cdcDataStart = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 0, + txEventsCount = 3, + operation = OperationType.updated + ), + payload = NodePayload(id = "0", + before = NodeChange(properties = mapOf("name" to "Andrea", "surname" to "Santurbano"), labels = listOf("User", "ToRemove")), + after = NodeChange(properties = mapOf("name" to "Andrea", "surname" to "Santurbano", "comp@ny" to "LARUS-BA"), labels = listOf("User", "NewLabel")) + ), + schema = nodeSchema + ) + val cdcDataEnd = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 1, + txEventsCount = 3, + operation = OperationType.updated + ), + payload = NodePayload(id = "1", + before = NodeChange(properties = mapOf("name" to "Michael", "surname" to "Hunger"), labels = listOf("User", "ToRemove")), + after = NodeChange(properties = mapOf("name" to "Michael", "surname" to "Hunger", "comp@ny" to "Neo4j"), labels = listOf("User", "NewLabel")) + ), + schema = nodeSchema + ) + val cdcQueryStrategy = SourceIdIngestionStrategy() + val txEvents = listOf( + StreamsSinkEntity(cdcDataStart, cdcDataStart), + StreamsSinkEntity(cdcDataEnd, cdcDataEnd)) + + // when + val nodeEvents = cdcQueryStrategy.mergeNodeEvents(txEvents) + val nodeDeleteEvents = cdcQueryStrategy.deleteNodeEvents(txEvents) + + // then + assertEquals(0, nodeDeleteEvents.size) + assertEquals(1, nodeEvents.size) + val nodeQuery = nodeEvents[0].query + val expectedNodeQuery = """ + |${KafkaUtil.UNWIND} + |MERGE (n:SourceEvent{sourceId: event.id}) + |SET n = event.properties + |SET n.sourceId = event.id + |REMOVE n:ToRemove + |SET n:NewLabel + """.trimMargin() + assertEquals(expectedNodeQuery, nodeQuery.trimIndent()) + val eventsNodeList = nodeEvents[0].events + assertEquals(2, eventsNodeList.size) + val expectedNodeEvents = listOf( + mapOf("id" to "0", "properties" to mapOf("name" to "Andrea", "surname" to "Santurbano", "comp@ny" to "LARUS-BA")), + mapOf("id" to "1", "properties" to mapOf("name" to "Michael", "surname" to "Hunger", "comp@ny" to "Neo4j")) + ) + assertEquals(expectedNodeEvents, eventsNodeList) + } + + @Test + fun `should create the Merge Query Strategy for relationships updates`() { + // given + val cdcDataRelationship = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 2, + txEventsCount = 3, + operation = OperationType.updated + ), + payload = RelationshipPayload( + id = "2", + start = RelationshipNodeChange(id = "0", labels = listOf("User"), ids = emptyMap()), + end = RelationshipNodeChange(id = "1", labels = listOf("User"), ids = emptyMap()), + after = RelationshipChange(properties = mapOf("since" to 2014, "foo" to "label")), + before = RelationshipChange(properties = mapOf("since" to 2014)), + label = "KNOWS WHO" + ), + schema = Schema() + ) + val cdcQueryStrategy = SourceIdIngestionStrategy() + val txEvents = listOf(StreamsSinkEntity(cdcDataRelationship, cdcDataRelationship)) + + // when + val relationshipEvents = cdcQueryStrategy.mergeRelationshipEvents(txEvents) + val relationshipDeleteEvents = cdcQueryStrategy.deleteRelationshipEvents(txEvents) + + // then + assertEquals(0, relationshipDeleteEvents.size) + assertEquals(1, relationshipEvents.size) + val relQuery = relationshipEvents[0].query + val expectedRelQuery = """ + |${KafkaUtil.UNWIND} + |MERGE (start:SourceEvent{sourceId: event.start}) + |MERGE (end:SourceEvent{sourceId: event.end}) + |MERGE (start)-[r:`KNOWS WHO`{sourceId: event.id}]->(end) + |SET r = event.properties + |SET r.sourceId = event.id + """.trimMargin() + assertEquals(expectedRelQuery, relQuery.trimIndent()) + val eventsRelList = relationshipEvents[0].events + assertEquals(1, eventsRelList.size) + val expectedRelEvents = listOf( + mapOf("id" to "2", "start" to "0", "end" to "1", "properties" to mapOf("since" to 2014, "foo" to "label")) + ) + assertEquals(expectedRelEvents, eventsRelList) + } + + @Test + fun `should create the Merge Query Strategy for node deletes`() { + // given + val nodeSchema = Schema() + // given + val cdcDataStart = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 0, + txEventsCount = 3, + operation = OperationType.deleted + ), + payload = NodePayload(id = "0", + before = NodeChange(properties = mapOf("name" to "Andrea", "surname" to "Santurbano"), labels = listOf("User")), + after = null + ), + schema = nodeSchema + ) + val cdcDataEnd = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 1, + txEventsCount = 3, + operation = OperationType.deleted + ), + payload = NodePayload(id = "1", + before = NodeChange(properties = mapOf("name" to "Michael", "surname" to "Hunger"), labels = listOf("User")), + after = null + ), + schema = nodeSchema + ) + val cdcQueryStrategy = SourceIdIngestionStrategy() + val txEvents = listOf( + StreamsSinkEntity(cdcDataStart, cdcDataStart), + StreamsSinkEntity(cdcDataEnd, cdcDataEnd)) + + // when + val nodeEvents = cdcQueryStrategy.mergeNodeEvents(txEvents) + val nodeDeleteEvents = cdcQueryStrategy.deleteNodeEvents(txEvents) + + // then + assertEquals(1, nodeDeleteEvents.size) + assertEquals(0, nodeEvents.size) + val nodeQuery = nodeDeleteEvents[0].query + val expectedNodeQuery = """ + |${KafkaUtil.UNWIND} MATCH (n:SourceEvent{sourceId: event.id}) DETACH DELETE n + """.trimMargin() + assertEquals(expectedNodeQuery, nodeQuery.trimIndent()) + val eventsNodeList = nodeDeleteEvents[0].events + assertEquals(2, eventsNodeList.size) + val expectedNodeEvents = listOf( + mapOf("id" to "0"), + mapOf("id" to "1") + ) + assertEquals(expectedNodeEvents, eventsNodeList) + } + + @Test + fun `should create the Merge Query Strategy for relationships deletes`() { + // given + val cdcDataRelationship = StreamsTransactionEvent( + meta = Meta(timestamp = System.currentTimeMillis(), + username = "user", + txId = 1, + txEventId = 2, + txEventsCount = 3, + operation = OperationType.deleted + ), + payload = RelationshipPayload( + id = "2", + start = RelationshipNodeChange(id = "0", labels = listOf("User"), ids = emptyMap()), + end = RelationshipNodeChange(id = "1", labels = listOf("User"), ids = emptyMap()), + after = RelationshipChange(properties = mapOf("since" to 2014, "foo" to "label")), + before = RelationshipChange(properties = mapOf("since" to 2014)), + label = "KNOWS WHO" + ), + schema = Schema() + ) + val cdcQueryStrategy = SourceIdIngestionStrategy() + val txEvents = listOf(StreamsSinkEntity(cdcDataRelationship, cdcDataRelationship)) + + // when + val relationshipEvents = cdcQueryStrategy.mergeRelationshipEvents(txEvents) + val relationshipDeleteEvents = cdcQueryStrategy.deleteRelationshipEvents(txEvents) + + // then + assertEquals(1, relationshipDeleteEvents.size) + assertEquals(0, relationshipEvents.size) + val relQuery = relationshipDeleteEvents[0].query + val expectedRelQuery = """ + |${KafkaUtil.UNWIND} MATCH ()-[r:`KNOWS WHO`{sourceId: event.id}]-() DELETE r + """.trimMargin() + assertEquals(expectedRelQuery, relQuery.trimIndent()) + val eventsRelList = relationshipDeleteEvents[0].events + assertEquals(1, eventsRelList.size) + val expectedRelEvents = listOf(mapOf("id" to "2")) + assertEquals(expectedRelEvents, eventsRelList) + } + +} + diff --git a/extended/src/test/kotlin/apoc/kafka/common/support/Assert.kt b/extended/src/test/kotlin/apoc/kafka/common/support/Assert.kt new file mode 100644 index 0000000000..e6f83634d5 --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/support/Assert.kt @@ -0,0 +1,37 @@ +package apoc.kafka.common.support + +import org.hamcrest.Matcher +import org.hamcrest.StringDescription +import org.neo4j.function.ThrowingSupplier +import java.util.concurrent.TimeUnit + +object Assert { + fun assertEventually(actual: ThrowingSupplier, matcher: Matcher, timeout: Long, timeUnit: TimeUnit) { + assertEventually({ _: T -> "" }, actual, matcher, timeout, timeUnit) + } + + fun assertEventually(reason: String, actual: ThrowingSupplier, matcher: Matcher, timeout: Long, timeUnit: TimeUnit) { + assertEventually({ _: T -> reason }, actual, matcher, timeout, timeUnit) + } + + fun assertEventually(reason: java.util.function.Function, actual: ThrowingSupplier, matcher: Matcher, timeout: Long, timeUnit: TimeUnit) { + val endTimeMillis = System.currentTimeMillis() + timeUnit.toMillis(timeout) + while (true) { + val sampleTime = System.currentTimeMillis() + val last: T = actual.get() + val matched: Boolean = matcher.matches(last) + if (matched || sampleTime > endTimeMillis) { + if (!matched) { + val description = StringDescription() + description.appendText(reason.apply(last)).appendText("\nExpected: ").appendDescriptionOf(matcher).appendText("\n but: ") + matcher.describeMismatch(last, description) + throw AssertionError("Timeout hit (" + timeout + " " + timeUnit.toString().toLowerCase() + ") while waiting for condition to match: " + description.toString()) + } else { + return + } + } + Thread.sleep(100L) + } + } + +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/common/support/KafkaTestUtils.kt b/extended/src/test/kotlin/apoc/kafka/common/support/KafkaTestUtils.kt new file mode 100644 index 0000000000..9f3e4c0eb2 --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/support/KafkaTestUtils.kt @@ -0,0 +1,62 @@ +package apoc.kafka.common.support + +import apoc.kafka.PublishProcedures +import apoc.kafka.consumer.procedures.StreamsSinkProcedures +import apoc.util.TestUtil +import org.apache.kafka.clients.consumer.ConsumerConfig +import org.apache.kafka.clients.consumer.KafkaConsumer +import org.apache.kafka.clients.producer.KafkaProducer +import org.apache.kafka.clients.producer.ProducerConfig +import org.apache.kafka.common.serialization.ByteArrayDeserializer +import org.apache.kafka.common.serialization.ByteArraySerializer +import org.apache.kafka.common.serialization.StringDeserializer +import org.apache.kafka.common.serialization.StringSerializer +import org.neo4j.configuration.GraphDatabaseSettings +import org.neo4j.dbms.api.DatabaseManagementService +import org.neo4j.graphdb.GraphDatabaseService +import org.neo4j.kernel.api.procedure.GlobalProcedures +import java.util.* + +object KafkaTestUtils { + fun createConsumer(bootstrapServers: String, + schemaRegistryUrl: String? = null, + keyDeserializer: String = StringDeserializer::class.java.name, + valueDeserializer: String = ByteArrayDeserializer::class.java.name, + vararg topics: String = emptyArray()): KafkaConsumer { + val props = Properties() + props[ProducerConfig.BOOTSTRAP_SERVERS_CONFIG] = bootstrapServers + props["group.id"] = "neo4j" // UUID.randomUUID().toString() + props["enable.auto.commit"] = "true" + props[ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG] = keyDeserializer + props[ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG] = valueDeserializer + props["auto.offset.reset"] = "earliest" + if (schemaRegistryUrl != null) { + props["schema.registry.url"] = schemaRegistryUrl + } + val consumer = KafkaConsumer(props) + if (!topics.isNullOrEmpty()) { + consumer.subscribe(topics.toList()) + } + return consumer + } + + fun createProducer(bootstrapServers: String, + schemaRegistryUrl: String? = null, + keySerializer: String = StringSerializer::class.java.name, + valueSerializer: String = ByteArraySerializer::class.java.name): KafkaProducer { + val props = Properties() + props[ProducerConfig.BOOTSTRAP_SERVERS_CONFIG] = bootstrapServers + props[ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG] = keySerializer + props[ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG] = valueSerializer + if (!schemaRegistryUrl.isNullOrBlank()) { + props["schema.registry.url"] = schemaRegistryUrl + } + return KafkaProducer(props) + } + + fun getDbServices(dbms: DatabaseManagementService): GraphDatabaseService { + val db = dbms.database(GraphDatabaseSettings.DEFAULT_DATABASE_NAME) + TestUtil.registerProcedure(db, StreamsSinkProcedures::class.java, GlobalProcedures::class.java, PublishProcedures::class.java); + return db + } +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/common/support/Neo4jContainerExtension.kt b/extended/src/test/kotlin/apoc/kafka/common/support/Neo4jContainerExtension.kt new file mode 100644 index 0000000000..45cf8aa73c --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/support/Neo4jContainerExtension.kt @@ -0,0 +1,178 @@ +package apoc.kafka.common.support + +import apoc.kafka.common.utils.Neo4jUtilsTest +import apoc.kafka.utils.KafkaUtil +import org.neo4j.driver.AuthToken +import org.neo4j.driver.AuthTokens +import org.neo4j.driver.Driver +import org.neo4j.driver.GraphDatabase +import org.neo4j.driver.Session +import org.neo4j.driver.SessionConfig +import org.rnorth.ducttape.unreliables.Unreliables +import org.slf4j.LoggerFactory +import org.testcontainers.containers.KafkaContainer +import org.testcontainers.containers.Neo4jContainer +import org.testcontainers.containers.Network +import org.testcontainers.containers.output.Slf4jLogConsumer +import org.testcontainers.containers.wait.strategy.AbstractWaitStrategy +import org.testcontainers.containers.wait.strategy.WaitAllStrategy +import org.testcontainers.containers.wait.strategy.WaitStrategy +import java.io.File +import java.time.Duration +import java.util.concurrent.TimeUnit + +private class DatabasesWaitStrategy(private val auth: AuthToken): AbstractWaitStrategy() { + private var databases = arrayOf() + + fun forDatabases(vararg databases: String): DatabasesWaitStrategy { + this.databases += databases + return this + } + + override fun waitUntilReady() { + val boltUrl = "bolt://${waitStrategyTarget.containerIpAddress}:${waitStrategyTarget.getMappedPort(7687)}" + val driver = GraphDatabase.driver(boltUrl, auth) + val systemSession = driver.session(SessionConfig.forDatabase(KafkaUtil.SYSTEM_DATABASE_NAME)) + systemSession.beginTransaction().use { tx -> + databases.forEach { tx.run("CREATE DATABASE $it IF NOT EXISTS") } + tx.commit() + } + Unreliables.retryUntilSuccess(startupTimeout.seconds.toInt(), TimeUnit.SECONDS) { + rateLimiter.doWhenReady { + if (databases.isNotEmpty()) { + val databasesStatus = systemSession.beginTransaction() + .use { tx -> tx.run("SHOW DATABASES").list().map { it.get("name").asString() to it.get("currentStatus").asString() }.toMap() } + val notOnline = databasesStatus.filterValues { it != "online" } + if (databasesStatus.size < databases.size || notOnline.isNotEmpty()) { + throw RuntimeException("Cannot started because of the following databases: ${notOnline.keys}") + } + } + } + true + } + systemSession.close() + driver.close() + } + +} + +class Neo4jContainerExtension(dockerImage: String): Neo4jContainer(dockerImage) { + constructor(): this("neo4j:5.20.0-enterprise") + private val logger = LoggerFactory.getLogger(Neo4jContainerExtension::class.java) + var driver: Driver? = null + var session: Session? = null + + private var cypher: String? = null + + private var withDriver = true + private var withLogger = false + private var withStreamsPlugin = true + private var forcePluginRebuild = true + + private var databases = arrayOf() + + private val waitStrategies = mutableListOf() + + fun withWaitStrategy(waitStrategy: WaitStrategy): Neo4jContainerExtension { + this.waitStrategies += waitStrategy + return this + } + + + fun withFixture(cypher: String): Neo4jContainerExtension { + this.cypher = cypher + return this + } + + fun withoutDriver(): Neo4jContainerExtension { + this.withDriver = false + return this + } + + fun withoutStreamsPlugin(): Neo4jContainerExtension { + this.withStreamsPlugin = false + return this + } + + fun withoutForcePluginRebuild(): Neo4jContainerExtension { + this.forcePluginRebuild = false + return this + } + + fun withKafka(kafka: KafkaContainer): Neo4jContainerExtension? { + return kafka.network?.let { + kafka.networkAliases?.map { "$it:9092" }?.let { + it1 -> withKafka(it, it1.joinToString(",")) + } + } + } + + fun withKafka(network: Network, bootstrapServers: String): Neo4jContainerExtension { + withNetwork(network) + withNeo4jConfig(Neo4jUtilsTest.KAFKA_BOOTSTRAP_SERVER, bootstrapServers) + return this + } + + fun withDatabases(vararg databases: String): Neo4jContainerExtension { + this.databases += databases + return this + } + + private fun createAuth(): AuthToken { + return if (!adminPassword.isNullOrBlank()) AuthTokens.basic("neo4j", adminPassword) else AuthTokens.none(); + } + + override fun start() { + if (databases.isNotEmpty()) { + withWaitStrategy( + DatabasesWaitStrategy(createAuth()) + .forDatabases(*databases) + .withStartupTimeout(Duration.ofMinutes(2))) + } + if (waitStrategies.isNotEmpty()) { + val waitAllStrategy = waitStrategy as WaitAllStrategy + waitStrategies.reversed() + .forEach { waitStrategy -> waitAllStrategy.withStrategy(waitStrategy) } + } + if (withLogger) { + withLogConsumer(Slf4jLogConsumer(logger)) + } + addEnv("NEO4J_ACCEPT_LICENSE_AGREEMENT", "yes") + + super.start() + if (withDriver) { + createDriver() + } + } + + private fun createDriver() { + driver = GraphDatabase.driver(boltUrl, createAuth()) + session = driver!!.session() + cypher?.split(";") + ?.forEach { query -> session!!.beginTransaction().use { it.run(query) } } + } + + private fun findDistrFile(): File? { + try { + return File("../target/containerPlugins").listFiles() + .filter { it.extension == "jar" } + .firstOrNull() + } catch (e: Exception) { + return null + } + } + + override fun stop() { + session?.close() + driver?.close() + super.stop() + if (withStreamsPlugin && forcePluginRebuild) { + findDistrFile()!!.delete() + } + } + + fun withLogging(): Neo4jContainerExtension { + this.withLogger = true + return this + } +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/common/utils/CoroutineUtilsTest.kt b/extended/src/test/kotlin/apoc/kafka/common/utils/CoroutineUtilsTest.kt new file mode 100644 index 0000000000..15f569034b --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/utils/CoroutineUtilsTest.kt @@ -0,0 +1,63 @@ +package apoc.kafka.common.utils + +import apoc.kafka.utils.KafkaUtil.retryForException +import kotlinx.coroutines.runBlocking +import org.junit.Test +import java.io.IOException +import kotlin.test.assertEquals +import kotlin.test.assertTrue + +class CoroutineUtilsTest { + + @Test + fun `should success after retry for known exception`() = runBlocking { + var count = 0 + var excuted = false + retryForException(exceptions = arrayOf(RuntimeException::class.java), + retries = 4, delayTime = 100) { + if (count < 2) { + ++count + throw RuntimeException() + } + excuted = true + } + + assertEquals(2, count) + assertTrue { excuted } + } + + @Test(expected = RuntimeException::class) + fun `should fail after retry for known exception`() { + var retries = 3 + runBlocking { + retryForException(exceptions = arrayOf(RuntimeException::class.java), + retries = 3, delayTime = 100) { + if (retries >= 0) { + --retries + throw RuntimeException() + } + } + } + } + + @Test + fun `should fail fast unknown exception`() { + var iteration = 0 + var isIOException = false + try { + runBlocking { + retryForException(exceptions = arrayOf(RuntimeException::class.java), + retries = 3, delayTime = 100) { + if (iteration >= 0) { + ++iteration + throw IOException() + } + } + } + } catch (e: Exception) { + isIOException = e is IOException + } + assertTrue { isIOException } + assertEquals(1, iteration) + } +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/common/utils/Neo4jUtilsTest.kt b/extended/src/test/kotlin/apoc/kafka/common/utils/Neo4jUtilsTest.kt new file mode 100644 index 0000000000..00a13acdb0 --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/utils/Neo4jUtilsTest.kt @@ -0,0 +1,23 @@ +package apoc.kafka.common.utils + +import apoc.kafka.utils.KafkaUtil +import org.junit.ClassRule +import org.junit.Test +import org.neo4j.test.rule.ImpermanentDbmsRule +import kotlin.test.assertTrue + +class Neo4jUtilsTest { + + companion object { + @ClassRule @JvmField + val db = ImpermanentDbmsRule() + + val KAFKA_BOOTSTRAP_SERVER = "apoc.kafka.bootstrap.servers" + } + + @Test + fun shouldCheckIfIsWriteableInstance() { + val isWriteableInstance = KafkaUtil.isWriteableInstance(db) + assertTrue { isWriteableInstance } + } +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/common/utils/ProcedureUtilsTest.kt b/extended/src/test/kotlin/apoc/kafka/common/utils/ProcedureUtilsTest.kt new file mode 100644 index 0000000000..93e81ba1a3 --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/utils/ProcedureUtilsTest.kt @@ -0,0 +1,22 @@ +package apoc.kafka.common.utils + +import apoc.kafka.utils.KafkaUtil +import org.junit.ClassRule +import org.junit.Test +import org.neo4j.test.rule.ImpermanentDbmsRule +import kotlin.test.assertFalse + +class ProcedureUtilsTest { + + companion object { + @ClassRule @JvmField + val db = ImpermanentDbmsRule() + } + + @Test + fun shouldCheckIfIsACluster() { + val isCluster = KafkaUtil.isCluster(db) + assertFalse { isCluster } + } + +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/common/utils/SchemaUtilsTest.kt b/extended/src/test/kotlin/apoc/kafka/common/utils/SchemaUtilsTest.kt new file mode 100644 index 0000000000..fcb7be8dac --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/utils/SchemaUtilsTest.kt @@ -0,0 +1,131 @@ +package apoc.kafka.common.utils + +import org.junit.Test +import apoc.kafka.events.Constraint +import apoc.kafka.events.RelKeyStrategy +import apoc.kafka.events.StreamsConstraintType +import apoc.kafka.utils.KafkaUtil.getNodeKeys +import kotlin.test.assertEquals + +class SchemaUtilsTest { + + @Test + fun `getNodeKeys should select the constraint with lowest properties`() { + val props = mapOf("LabelA" to setOf("foo", "bar"), + "LabelB" to setOf("foo", "bar", "fooBar"), + "LabelC" to setOf("foo")) + val constraints = props.map { + Constraint(label = it.key, properties = it.value, type = StreamsConstraintType.UNIQUE) + } + val keys = getNodeKeys(props.keys.toList(), setOf("prop", "foo", "bar"), constraints) + assertEquals(setOf("foo"), keys) + } + + @Test + fun `getNodeKeys should return the key sorted properly`() { + // the method getNodeKeys should select (with multiple labels) the constraint with lowest properties + // with the same size, we take the first label in alphabetical order + // finally, with same label name, we take the first sorted properties list alphabetically + + val pair1 = "LabelX" to setOf("foo", "aaa") + val pair2 = "LabelB" to setOf("bar", "foo") + val pair3 = "LabelC" to setOf("baz", "bar") + val pair4 = "LabelB" to setOf("bar", "bez") + val pair5 = "LabelA" to setOf("bar", "baa", "xcv") + val pair6 = "LabelC" to setOf("aaa", "baa", "xcz") + val pair7 = "LabelA" to setOf("foo", "aac") + val pair8 = "LabelA" to setOf("foo", "aab") + val props = listOf(pair1, pair2, pair3, pair4, pair5, pair6, pair7, pair8) + + // we shuffle the constraints to ensure that the result doesn't depend from the ordering + val constraints = props.map { + Constraint(label = it.first, properties = it.second, type = StreamsConstraintType.UNIQUE) + }.shuffled() + + val propertyKeys = setOf("prop", "prop2", "foo", "bar", "baz", "bez", "aaa", "aab", "baa", "aac", "xcz", "xcv") + val actualKeys = getNodeKeys(props.map { it.first }, propertyKeys, constraints) + val expectedKeys = setOf("aab", "foo") + + assertEquals(expectedKeys, actualKeys) + } + + @Test + fun `getNodeKeys should return all keys when RelKeyStrategy is ALL`() { + + val pair1 = "LabelX" to setOf("foo", "aaa") + val pair2 = "LabelB" to setOf("bar", "foo") + val pair3 = "LabelC" to setOf("baz", "bar") + val pair4 = "LabelB" to setOf("bar", "bez") + val pair5 = "LabelA" to setOf("bar", "baa", "xcv") + val pair6 = "LabelC" to setOf("aaa", "baa", "xcz") + val pair7 = "LabelA" to setOf("foo", "aac") + val pair8 = "LabelA" to setOf("foo", "aab") + val props = listOf(pair1, pair2, pair3, pair4, pair5, pair6, pair7, pair8) + + // we shuffle the constraints to ensure that the result doesn't depend from the ordering + val constraints = props.map { + Constraint(label = it.first, properties = it.second, type = StreamsConstraintType.UNIQUE) + }.shuffled() + + val propertyKeys = setOf("prop", "prop2", "foo", "bar", "baz", "bez", "aaa", "aab", "baa", "aac", "xcz", "xcv") + val actualKeys = getNodeKeys(props.map { it.first }, propertyKeys, constraints, RelKeyStrategy.ALL) + val expectedKeys = setOf("aaa", "aab", "aac", "baa", "bar", "baz", "bez", "foo", "xcv", "xcz") + + assertEquals(expectedKeys, actualKeys) + } + + @Test + fun `getNodeKeys should return the key sorted properly (with one label)`() { + // the method getNodeKeys should select the constraint with lowest properties + // with the same size, we take the first sorted properties list alphabetically + + val pair1 = "LabelA" to setOf("foo", "bar") + val pair2 = "LabelA" to setOf("bar", "foo") + val pair3 = "LabelA" to setOf("baz", "bar") + val pair4 = "LabelA" to setOf("bar", "bez") + val props = listOf(pair1, pair2, pair3, pair4) + + // we shuffle the constraints to ensure that the result doesn't depend from the ordering + val constraints = props.map { + Constraint(label = it.first, properties = it.second, type = StreamsConstraintType.UNIQUE) + }.shuffled() + + val propertyKeys = setOf("prop", "foo", "bar", "baz", "bez") + val actualKeys = getNodeKeys(listOf("LabelA"), propertyKeys, constraints) + val expectedKeys = setOf("bar", "baz") + + assertEquals(expectedKeys, actualKeys) + } + @Test + fun `getNodeKeys should return all keys when RelKeyStrategy is ALL (with one label)`() { + + val pair1 = "LabelA" to setOf("foo", "bar") + val pair2 = "LabelA" to setOf("bar", "foo") + val pair3 = "LabelA" to setOf("baz", "bar") + val pair4 = "LabelA" to setOf("bar", "bez") + val props = listOf(pair1, pair2, pair3, pair4) + + // we shuffle the constraints to ensure that the result doesn't depend from the ordering + val constraints = props.map { + Constraint(label = it.first, properties = it.second, type = StreamsConstraintType.UNIQUE) + }.shuffled() + + val propertyKeys = setOf("prop", "foo", "bar", "baz", "bez") + val actualKeys = getNodeKeys(listOf("LabelA"), propertyKeys, constraints, RelKeyStrategy.ALL) + val expectedKeys = setOf("bar", "baz", "bez", "foo") + + assertEquals(expectedKeys, actualKeys) + } + + @Test + fun `getNodeKeys should return empty in case it didn't match anything`() { + val props = mapOf("LabelA" to setOf("foo", "bar"), + "LabelB" to setOf("foo", "bar", "fooBar"), + "LabelC" to setOf("foo")) + val constraints = props.map { + Constraint(label = it.key, properties = it.value, type = StreamsConstraintType.UNIQUE) + } + val keys = getNodeKeys(props.keys.toList(), setOf("prop", "key"), constraints) + assertEquals(emptySet(), keys) + } +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/common/utils/StreamsUtilsTest.kt b/extended/src/test/kotlin/apoc/kafka/common/utils/StreamsUtilsTest.kt new file mode 100644 index 0000000000..0b6005e8af --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/common/utils/StreamsUtilsTest.kt @@ -0,0 +1,35 @@ +package apoc.kafka.common.utils + +import apoc.kafka.utils.KafkaUtil +import org.junit.Test +import java.io.IOException +import kotlin.test.assertNull +import kotlin.test.assertTrue + +class StreamsUtilsTest { + + private val foo = "foo" + + @Test + fun shouldReturnValue() { + val data = KafkaUtil.ignoreExceptions({ + foo + }, RuntimeException::class.java) + assertTrue { data != null && data == foo } + } + + @Test + fun shouldIgnoreTheException() { + val data = KafkaUtil.ignoreExceptions({ + throw RuntimeException() + }, RuntimeException::class.java) + assertNull(data) + } + + @Test(expected = IOException::class) + fun shouldNotIgnoreTheException() { + KafkaUtil.ignoreExceptions({ + throw IOException() + }, RuntimeException::class.java) + } +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/consumer/kafka/KafkaConsumeProceduresTSE.kt b/extended/src/test/kotlin/apoc/kafka/consumer/kafka/KafkaConsumeProceduresTSE.kt new file mode 100644 index 0000000000..32500fca16 --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/consumer/kafka/KafkaConsumeProceduresTSE.kt @@ -0,0 +1,189 @@ +package apoc.kafka.consumer.kafka + +import apoc.kafka.common.support.KafkaTestUtils +import apoc.util.JsonUtil +import kotlinx.coroutines.* +import org.apache.kafka.clients.consumer.ConsumerConfig +import org.apache.kafka.clients.producer.ProducerRecord +import org.apache.kafka.common.TopicPartition +import org.junit.Test +import org.neo4j.graphdb.GraphDatabaseService +import java.util.* +import kotlin.test.* + +@Suppress("UNCHECKED_CAST", "DEPRECATION") +class KafkaConsumeProceduresTSE : KafkaEventSinkBaseTSE() { + + private fun testProcedure(db: GraphDatabaseService, topic: String) { + + val producerRecord = ProducerRecord(topic, "{\"id\": \"{${UUID.randomUUID()}}\"}", JsonUtil.writeValueAsBytes(data)) + kafkaProducer.send(producerRecord).get() + db.executeTransactionally("CALL apoc.kafka.consume('$topic', {timeout: 5000}) YIELD event RETURN event", emptyMap()) { result -> + assertTrue { result.hasNext() } + val resultMap = result.next() + assertTrue { resultMap.containsKey("event") } + assertNotNull(resultMap["event"], "should contain event") + val event = resultMap["event"] as Map + val resultData = event["data"] as Map + assertEquals(data, resultData) + } + } + + @Test + fun shouldConsumeDataFromProcedureWithSinkDisabled() { + val db = createDbWithKafkaConfigs( + "apoc.kafka.sink.enabled" to "false", + "apoc.kafka.${ConsumerConfig.GROUP_ID_CONFIG}" to "1" + ) + + val topic = "bar" + testProcedure(db, topic) + } + + @Test + fun shouldConsumeDataFromProcedure() { + val db = createDbWithKafkaConfigs("apoc.kafka.${ConsumerConfig.GROUP_ID_CONFIG}" to "2") + val topic = "foo" + testProcedure(db, topic) + } + + @Test + fun shouldTimeout() { + val db = createDbWithKafkaConfigs() + db.executeTransactionally("CALL apoc.kafka.consume('foo1', {timeout: 2000}) YIELD event RETURN event", emptyMap()) { + assertFalse { it.hasNext() } + } + } + + @Test + fun shouldReadSimpleDataType() { + val db = createDbWithKafkaConfigs("apoc.kafka.${ConsumerConfig.GROUP_ID_CONFIG}" to "3") + + val topic = "simple-data" + val simpleInt = 1 + val simpleBoolean = true + val simpleString = "test" + var producerRecord = ProducerRecord(topic, "{\"a\":1}", JsonUtil.writeValueAsBytes(simpleInt)) + kafkaProducer.send(producerRecord).get() + producerRecord = ProducerRecord(topic, "{\"a\":2}", JsonUtil.writeValueAsBytes(simpleBoolean)) + kafkaProducer.send(producerRecord).get() + producerRecord = ProducerRecord(topic, "{\"a\":3}", JsonUtil.writeValueAsBytes(simpleString)) + kafkaProducer.send(producerRecord).get() + db.executeTransactionally(""" + CALL apoc.kafka.consume('$topic', {timeout: 5000}) YIELD event + MERGE (t:LOG{simpleData: event.data}) + RETURN count(t) AS insert + """.trimIndent()) + db.executeTransactionally(""" + MATCH (l:LOG) + WHERE l.simpleData IN [$simpleInt, $simpleBoolean, "$simpleString"] + RETURN count(l) as count + """.trimIndent(), emptyMap() + ) { searchResult -> + assertTrue { searchResult.hasNext() } + val searchResultMap = searchResult.next() + assertTrue { searchResultMap.containsKey("count") } + assertEquals(3L, searchResultMap["count"]) + } + } + + @Test + fun shouldReadATopicPartitionStartingFromAnOffset() = runBlocking { + val db = createDbWithKafkaConfigs() + + val topic = "read-from-range" + val partition = 0 + var start = -1L + (1..10).forEach { + val producerRecord = ProducerRecord(topic, partition, "{\"a\":1}", JsonUtil.writeValueAsBytes("{\"b\":${it}}")) + val recordMetadata = kafkaProducer.send(producerRecord).get() + if (it == 6) { + start = recordMetadata.offset() + } + } + delay(3000) + db.executeTransactionally(""" + CALL apoc.kafka.consume('$topic', {timeout: 5000, partitions: [{partition: $partition, offset: $start}]}) YIELD event + CREATE (t:LOG{simpleData: event.data}) + RETURN count(t) AS insert + """.trimIndent()) + + val count = db.executeTransactionally(""" + MATCH (l:LOG) + RETURN count(l) as count + """.trimIndent(), emptyMap() + ) { + it.columnAs("count").next() + } + assertEquals(5L, count) + } + + @Test + fun shouldReadFromLatest() = runBlocking { + val db = createDbWithKafkaConfigs() + + val topic = "simple-data-from-latest" + val simpleString = "test" + val partition = 0 + (1..10).forEach { + val producerRecord = ProducerRecord(topic, partition, "{\"a\":${it}}", JsonUtil.writeValueAsBytes("{\"b\":${it}}")) + kafkaProducer.send(producerRecord).get() + } + delay(1000) // should ignore the three above + GlobalScope.launch(Dispatchers.IO) { + delay(1000) + val producerRecord = ProducerRecord(topic, partition, "{\"a\":1}", JsonUtil.writeValueAsBytes(simpleString)) + kafkaProducer.send(producerRecord).get() + } + db.executeTransactionally(""" + CALL apoc.kafka.consume('$topic', {timeout: 5000, from: 'latest', groupId: 'foo'}) YIELD event + CREATE (t:LOG{simpleData: event.data}) + RETURN count(t) AS insert + """.trimIndent()) + db.executeTransactionally(""" + MATCH (l:LOG) + RETURN count(l) AS count + """.trimIndent(), emptyMap() + ) { searchResult -> + assertTrue { searchResult.hasNext() } + val searchResultMap = searchResult.next() + assertTrue { searchResultMap.containsKey("count") } + assertEquals(1L, searchResultMap["count"]) + } + Unit + } + + @Test + fun shouldNotCommit() { + val db = createDbWithKafkaConfigs( + "enable.auto.commit" to false, + "apoc.kafka.${ConsumerConfig.GROUP_ID_CONFIG}" to "ajeje" + ) + + val topic = "simple-data" + val simpleInt = 1 + val partition = 0 + var producerRecord = ProducerRecord(topic, partition, "{\"a\":1}", JsonUtil.writeValueAsBytes("{\"b\":${simpleInt}}")) + kafkaProducer.send(producerRecord).get() + db.executeTransactionally(""" + CALL apoc.kafka.consume('$topic', {timeout: 5000, autoCommit: false, commit:false}) YIELD event + MERGE (t:LOG{simpleData: event.data}) + RETURN count(t) AS insert + """.trimIndent()) + db.executeTransactionally(""" + MATCH (l:LOG) + RETURN count(l) as count + """.trimIndent(), emptyMap() + ) { searchResult -> + assertTrue { searchResult.hasNext() } + val searchResultMap = searchResult.next() + assertTrue { searchResultMap.containsKey("count") } + assertEquals(1L, searchResultMap["count"]) + } + val kafkaConsumer = KafkaTestUtils.createConsumer( + bootstrapServers = KafkaEventSinkSuiteIT.kafka.bootstrapServers, + schemaRegistryUrl = KafkaEventSinkSuiteIT.schemaRegistry.getSchemaRegistryUrl()) + val offsetAndMetadata = kafkaConsumer.committed(TopicPartition(topic, partition)) + assertNull(offsetAndMetadata) + } +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/consumer/kafka/KafkaEventSinkBaseTSE.kt b/extended/src/test/kotlin/apoc/kafka/consumer/kafka/KafkaEventSinkBaseTSE.kt new file mode 100644 index 0000000000..8d4734cce9 --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/consumer/kafka/KafkaEventSinkBaseTSE.kt @@ -0,0 +1,117 @@ +package apoc.kafka.consumer.kafka + +import apoc.kafka.PublishProcedures +import apoc.kafka.consumer.procedures.StreamsSinkProcedures +import org.apache.avro.generic.GenericRecord +import org.apache.kafka.clients.producer.KafkaProducer +import org.junit.jupiter.api.AfterAll +import org.junit.jupiter.api.AfterEach +import org.junit.jupiter.api.BeforeAll +import org.junit.jupiter.api.BeforeEach +import apoc.kafka.common.support.KafkaTestUtils +import apoc.util.DbmsTestUtil +import apoc.util.TestUtil +import org.junit.* +import org.junit.rules.TemporaryFolder +import org.neo4j.configuration.GraphDatabaseSettings +import org.neo4j.dbms.api.DatabaseManagementService +import org.neo4j.graphdb.GraphDatabaseService +import org.neo4j.kernel.api.procedure.GlobalProcedures + +import apoc.ExtendedApocConfig.APOC_KAFKA_ENABLED +import apoc.kafka.common.utils.Neo4jUtilsTest +import org.apache.kafka.common.serialization.ByteArraySerializer + +open class KafkaEventSinkBaseTSE { + + companion object { + private var startedFromSuite = true + + lateinit var dbms: DatabaseManagementService + + @BeforeClass + @BeforeAll + @JvmStatic + fun setUpContainer() { + if (!KafkaEventSinkSuiteIT.isRunning) { + startedFromSuite = false + KafkaEventSinkSuiteIT.setUpContainer() + } + } + + @AfterClass + @AfterAll + @JvmStatic + fun tearDownContainer() { + if (!startedFromSuite) { + KafkaEventSinkSuiteIT.tearDownContainer() + } + } + } + + @JvmField + @Rule + var temporaryFolder = TemporaryFolder() + + lateinit var kafkaProducer: KafkaProducer + lateinit var kafkaCustomProducer: KafkaProducer + + + // Test data + val dataProperties = mapOf("prop1" to "foo", "bar" to 1) + val data = mapOf("id" to 1, "properties" to dataProperties) + + @Before + @BeforeEach + fun setUp() { + kafkaProducer = KafkaTestUtils.createProducer( + bootstrapServers = KafkaEventSinkSuiteIT.kafka.bootstrapServers + ) + kafkaCustomProducer = KafkaTestUtils.createProducer( + bootstrapServers = KafkaEventSinkSuiteIT.kafka.bootstrapServers, + schemaRegistryUrl = KafkaEventSinkSuiteIT.schemaRegistry.getSchemaRegistryUrl(), + keySerializer = ByteArraySerializer::class.java.name, + valueSerializer = ByteArraySerializer::class.java.name) + } + + fun createDbWithKafkaConfigs(vararg pairs: Pair) : GraphDatabaseService { + val mutableMapOf = mutableMapOf( + Neo4jUtilsTest.KAFKA_BOOTSTRAP_SERVER to KafkaEventSinkSuiteIT.kafka.bootstrapServers, + APOC_KAFKA_ENABLED to "true", + "bootstrap.servers" to KafkaEventSinkSuiteIT.kafka.bootstrapServers, + "apoc.kafka.sink.enabled" to "true" + ) + + mutableMapOf.putAll(mapOf(*pairs)) + + dbms = DbmsTestUtil.startDbWithApocConfigs( + temporaryFolder, + mutableMapOf as Map? + ) + return getDbServices() + } + + private fun KafkaProducer.flushAndClose() { + this.flush() + this.close() + } + + @After + @AfterEach + fun tearDown() { + dbms.shutdown() + + if (::kafkaProducer.isInitialized) { + kafkaProducer.flushAndClose() + } + if (::kafkaCustomProducer.isInitialized) { + kafkaCustomProducer.flushAndClose() + } + } + + private fun getDbServices(): GraphDatabaseService { + val db = dbms.database(GraphDatabaseSettings.DEFAULT_DATABASE_NAME) + TestUtil.registerProcedure(db, StreamsSinkProcedures::class.java, GlobalProcedures::class.java, PublishProcedures::class.java); + return db + } +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/consumer/kafka/KafkaEventSinkSuiteIT.kt b/extended/src/test/kotlin/apoc/kafka/consumer/kafka/KafkaEventSinkSuiteIT.kt new file mode 100644 index 0000000000..07d505b876 --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/consumer/kafka/KafkaEventSinkSuiteIT.kt @@ -0,0 +1,101 @@ +package apoc.kafka.consumer.kafka + + +import org.testcontainers.containers.GenericContainer +import org.testcontainers.containers.KafkaContainer +import org.testcontainers.containers.Network +import org.testcontainers.containers.SocatContainer +import org.testcontainers.containers.wait.strategy.Wait +import java.util.stream.Stream + +import apoc.kafka.utils.KafkaUtil +import org.junit.AfterClass +import org.junit.Assume.assumeTrue +import org.junit.BeforeClass +import org.testcontainers.utility.DockerImageName + +class KafkaEventSinkSuiteIT { + + class SchemaRegistryContainer(version: String): GenericContainer("confluentinc/cp-schema-registry:$version") { + + private lateinit var proxy: SocatContainer + + override fun doStart() { + val networkAlias = networkAliases[0] + proxy = SocatContainer() + .withNetwork(network) + .withTarget(PORT, networkAlias) + + proxy.start() + super.doStart() + } + + fun withKafka(kafka: KafkaContainer): SchemaRegistryContainer? { + return kafka.network?.let { withKafka(it, kafka.networkAliases.map { "PLAINTEXT://$it:9092" }.joinToString(",")) } + } + + fun withKafka(network: Network, bootstrapServers: String): SchemaRegistryContainer { + withNetwork(network) + withEnv("SCHEMA_REGISTRY_HOST_NAME", "schema-registry") + withEnv("SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS", bootstrapServers) + return self() + } + + fun getSchemaRegistryUrl() = "http://${proxy.containerIpAddress}:${proxy.firstMappedPort}" + + override fun stop() { + Stream.of(Runnable { super.stop() }, Runnable { proxy.stop() }).parallel().forEach { it.run() } + } + + companion object { + @JvmStatic val PORT = 8081 + } + } + + companion object { + /** + * Kafka TestContainers uses Confluent OSS images. + * We need to keep in mind which is the right Confluent Platform version for the Kafka version this project uses + * + * Confluent Platform | Apache Kafka + * | + * 4.0.x | 1.0.x + * 4.1.x | 1.1.x + * 5.0.x | 2.0.x + * + * Please see also https://docs.confluent.io/current/installation/versions-interoperability.html#cp-and-apache-kafka-compatibility + */ + private const val confluentPlatformVersion = "7.6.2" + @JvmStatic lateinit var kafka: KafkaContainer + @JvmStatic lateinit var schemaRegistry: SchemaRegistryContainer + + var isRunning = false + + @BeforeClass + @JvmStatic + fun setUpContainer() { + kafka = KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:7.6.2")) + .withNetwork(Network.newNetwork()) + kafka.start() + schemaRegistry = SchemaRegistryContainer(confluentPlatformVersion) + .withExposedPorts(8081) + .dependsOn(kafka) + .withKafka(kafka)!! + schemaRegistry.start() + isRunning = true + assumeTrue("Kafka must be running", ::kafka.isInitialized && kafka.isRunning) + assumeTrue("Schema Registry must be running", schemaRegistry.isRunning) + assumeTrue("isRunning must be true", isRunning) + } + + @AfterClass + @JvmStatic + fun tearDownContainer() { + KafkaUtil.ignoreExceptions({ + kafka.stop() + schemaRegistry.stop() + isRunning = false + }, UninitializedPropertyAccessException::class.java) + } + } +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/producer/RoutingConfigurationTest.kt b/extended/src/test/kotlin/apoc/kafka/producer/RoutingConfigurationTest.kt new file mode 100644 index 0000000000..45495a40d9 --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/producer/RoutingConfigurationTest.kt @@ -0,0 +1,348 @@ +package apoc.kafka.producer + +import apoc.kafka.events.* +import apoc.kafka.producer.events.* +import org.junit.Test +import kotlin.test.assertEquals +import kotlin.test.assertFalse +import kotlin.test.assertTrue + +@Suppress("UNCHECKED_CAST") +class RoutingConfigurationTest { + + @Test + fun badPatternShouldThrowIllegalArgumentException() { + val topic = "topic1" + assertIllegalArgumentException(topic, "Label(1,2)", EntityType.node) + assertIllegalArgumentException(topic, "Label{}", EntityType.node) + assertIllegalArgumentException(topic, "KNOWS{}", EntityType.relationship) + } + + private fun assertIllegalArgumentException(topic: String, pattern: String, entityType: EntityType) { + var hasException = false + try { + RoutingConfigurationFactory.getRoutingConfiguration(topic, pattern, entityType) + } catch (e: Exception) { + assertTrue { e is IllegalArgumentException } + assertEquals("The pattern $pattern for topic $topic is invalid", e.message) + hasException = true + } + assertTrue { hasException } + } + + @Test + fun shouldCreateNodeRoutingConfiguration() { + var routing = RoutingConfigurationFactory.getRoutingConfiguration("topic1", "*", EntityType.node) as List + assertEquals(1, routing.size) + assertEquals("topic1", routing[0].topic) + assertTrue { routing[0].all } + assertTrue { routing[0].labels.isEmpty() } + assertTrue { routing[0].include.isEmpty() } + assertTrue { routing[0].exclude.isEmpty() } + + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic2", "Label1:Label2{p1,p2}", EntityType.node) as List + assertEquals(1, routing.size) + assertEquals("topic2", routing[0].topic) + assertFalse { routing[0].all } + assertEquals(listOf("Label1","Label2"), routing[0].labels) + assertEquals(listOf("p1","p2"), routing[0].include) + assertTrue { routing[0].exclude.isEmpty() } + + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic3.1", "Label1;Label2{ p1, p2}", EntityType.node) as List + assertEquals(2, routing.size) + assertEquals("topic3.1", routing[0].topic) + assertTrue { routing[0].all } + assertEquals(listOf("Label1"), routing[0].labels) + assertTrue { routing[0].include.isEmpty() } + assertTrue { routing[0].exclude.isEmpty() } + assertEquals("topic3.1", routing[1].topic) + assertFalse { routing[1].all } + assertEquals(listOf("Label2"), routing[1].labels) + assertEquals(listOf("p1","p2"), routing[1].include) + assertTrue { routing[1].exclude.isEmpty() } + + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic4", "Label2{ -p1, -p2}", EntityType.node) as List + assertEquals(1, routing.size) + assertEquals("topic4", routing[0].topic) + assertFalse { routing[0].all } + assertEquals(listOf("Label2"), routing[0].labels) + assertTrue { routing[0].include.isEmpty() } + assertEquals(listOf("p1","p2"), routing[0].exclude) + + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic5", "Label3{*}", EntityType.node) as List + assertEquals(1, routing.size) + assertEquals("topic5", routing[0].topic) + assertTrue { routing[0].all } + assertEquals(listOf("Label3"), routing[0].labels) + assertTrue { routing[0].include.isEmpty() } + assertTrue { routing[0].exclude.isEmpty() } + + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic6", "Label4{ p1,p2, p3, p4}", EntityType.node) as List + assertEquals(1, routing.size) + assertEquals("topic6", routing[0].topic) + assertFalse { routing[0].all } + assertEquals(listOf("Label4"), routing[0].labels) + assertTrue { routing[0].exclude.isEmpty() } + assertEquals(listOf("p1","p2","p3","p4"), routing[0].include) + + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic7", "Label:`labels::label`{ p1,p2, p3, p4}", EntityType.node) as List + assertEquals(1, routing.size) + assertEquals("topic7", routing[0].topic) + assertFalse { routing[0].all } + assertEquals(listOf("Label", "labels::label"), routing[0].labels) + assertTrue { routing[0].exclude.isEmpty() } + assertEquals(listOf("p1","p2","p3","p4"), routing[0].include) + + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic8", " Label : ` lorem : ipsum : dolor : sit `{name, surname}", EntityType.node) as List + assertEquals(1, routing.size) + assertEquals("topic8", routing[0].topic) + assertFalse { routing[0].all } + assertEquals(listOf("Label", " lorem : ipsum : dolor : sit "), routing[0].labels) + assertTrue { routing[0].exclude.isEmpty() } + assertEquals(listOf("name","surname"), routing[0].include) + + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic9", " `labels::label`:Label:Label1{name, surname}", EntityType.node) as List + assertEquals(1, routing.size) + assertEquals("topic9", routing[0].topic) + assertFalse { routing[0].all } + assertEquals(listOf("labels::label", "Label", "Label1"), routing[0].labels) + assertTrue { routing[0].exclude.isEmpty() } + assertEquals(listOf("name","surname"), routing[0].include) + + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic10", ":Label:```labels::label```:Label1{one, two}", EntityType.node) as List + assertEquals(1, routing.size) + assertEquals("topic10", routing[0].topic) + assertFalse { routing[0].all } + assertEquals(listOf("Label", "labels::label", "Label1"), routing[0].labels) + assertTrue { routing[0].exclude.isEmpty() } + assertEquals(listOf("one","two"), routing[0].include) + + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic11", ":Label:`labels::label`:`labels1::label1`:Label1{name, surname}", EntityType.node) as List + assertEquals(1, routing.size) + assertEquals("topic11", routing[0].topic) + assertFalse { routing[0].all } + assertEquals(listOf("Label", "labels::label", "labels1::label1", "Label1"), routing[0].labels) + assertTrue { routing[0].exclude.isEmpty() } + assertEquals(listOf("name","surname"), routing[0].include) + } + + @Test + fun shouldCreateRelationshipRoutingConfiguration() { + + + var routing = RoutingConfigurationFactory.getRoutingConfiguration("topic1", "*", EntityType.relationship) as List + assertEquals(1, routing.size) + assertEquals("topic1", routing[0].topic) + assertEquals(RelKeyStrategy.DEFAULT, routing[0].relKeyStrategy) + assertTrue { routing[0].all } + assertTrue { routing[0].name == "" } + assertTrue { routing[0].include.isEmpty() } + assertTrue { routing[0].exclude.isEmpty() } + + + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic2", "KNOWS", EntityType.relationship) as List + assertEquals(1, routing.size) + assertEquals("topic2", routing[0].topic) + assertEquals(RelKeyStrategy.DEFAULT, routing[0].relKeyStrategy) + assertTrue { routing[0].all } + assertEquals("KNOWS",routing[0].name) + assertTrue { routing[0].include.isEmpty() } + assertTrue { routing[0].exclude.isEmpty() } + + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic3", "KNOWS{*}", EntityType.relationship) as List + assertEquals(1, routing.size) + assertEquals("topic3", routing[0].topic) + assertEquals(RelKeyStrategy.DEFAULT, routing[0].relKeyStrategy) + assertTrue { routing[0].all } + assertEquals("KNOWS",routing[0].name) + assertTrue { routing[0].include.isEmpty() } + assertTrue { routing[0].exclude.isEmpty() } + + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic4", "KNOWS;LOVES{p1, p2}", EntityType.relationship) as List + assertEquals(2, routing.size) + assertEquals("topic4", routing[0].topic) + assertEquals(RelKeyStrategy.DEFAULT, routing[0].relKeyStrategy) + assertTrue { routing[0].all } + assertEquals("KNOWS",routing[0].name) + assertTrue { routing[0].include.isEmpty() } + assertTrue { routing[0].exclude.isEmpty() } + assertEquals("topic4", routing[1].topic) + assertFalse { routing[1].all } + assertEquals("LOVES",routing[1].name) + assertEquals(listOf("p1","p2"),routing[1].include) + assertTrue { routing[1].exclude.isEmpty() } + + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic5", "LOVES{-p1, -p2 }", EntityType.relationship) as List + assertEquals(1, routing.size) + assertEquals("topic5", routing[0].topic) + assertEquals(RelKeyStrategy.DEFAULT, routing[0].relKeyStrategy) + assertFalse { routing[0].all } + assertEquals("LOVES",routing[0].name) + assertTrue { routing[0].include.isEmpty() } + assertEquals(listOf("p1","p2"),routing[0].exclude) + + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic6", "`KNOWS::VERY:WELL`{one, -two }", EntityType.relationship) as List + assertEquals(1, routing.size) + assertEquals("topic6", routing[0].topic) + assertFalse { routing[0].all } + assertEquals("KNOWS::VERY:WELL",routing[0].name) + assertEquals(listOf("one"),routing[0].include) + assertEquals(listOf("two"),routing[0].exclude) + + // valid relKeyStrategy ALL + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic6", "KNOWS{*}", EntityType.relationship, RelKeyStrategy.ALL.toString().toLowerCase()) as List + assertEquals(1, routing.size) + assertEquals("topic6", routing[0].topic) + assertEquals(RelKeyStrategy.ALL, routing[0].relKeyStrategy) + assertTrue { routing[0].all } + assertEquals("KNOWS",routing[0].name) + assertTrue { routing[0].include.isEmpty() } + assertTrue { routing[0].exclude.isEmpty() } + + // valid relKeyStrategy DEFAULT + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic7", "LOVES{-p1, -p2 }", EntityType.relationship, RelKeyStrategy.DEFAULT.toString().toLowerCase()) as List + assertEquals(1, routing.size) + assertEquals("topic7", routing[0].topic) + assertEquals(RelKeyStrategy.DEFAULT, routing[0].relKeyStrategy) + assertFalse { routing[0].all } + assertEquals("LOVES",routing[0].name) + assertTrue { routing[0].include.isEmpty() } + assertEquals(listOf("p1","p2"),routing[0].exclude) + + // invalid relKeyStrategy + routing = RoutingConfigurationFactory.getRoutingConfiguration("topic8", "ANOTHER_ONE{-p1, -p2 }", EntityType.relationship, "Franco") as List + assertEquals(1, routing.size) + assertEquals("topic8", routing[0].topic) + assertEquals(RelKeyStrategy.DEFAULT, routing[0].relKeyStrategy) + assertFalse { routing[0].all } + assertEquals("ANOTHER_ONE",routing[0].name) + assertTrue { routing[0].include.isEmpty() } + assertEquals(listOf("p1","p2"),routing[0].exclude) + } + + @Test(expected = IllegalArgumentException::class) + fun multipleRelationshipsShouldThrowIllegalArgumentException() { + RoutingConfigurationFactory.getRoutingConfiguration("topic2", "KNOWS:FAILS", EntityType.relationship) + } + + @Test + fun shouldFilterAndRouteNodeEvents() { + // TODO add more tests like a Label removed + // Given + val payload = NodePayloadBuilder() + .withBefore(NodeChange(properties = mapOf("prop1" to 1, "prop2" to "pippo", "prop3" to 3), labels = listOf("Label1", "Label2"))) + .withAfter(NodeChange(properties = mapOf("prop1" to 1, "prop2" to "pippo", "prop3" to 3, "prop4" to 4), labels = listOf("Label1", "Label2", "Label3 :: Label4"))) + .build() + val streamsEvent = StreamsTransactionEventBuilder() + .withMeta( + StreamsEventMetaBuilder() + .withOperation(OperationType.created) + .withTimestamp(System.currentTimeMillis()) + .withTransactionEventId(1) + .withTransactionEventsCount(1) + .withUsername("user") + .withTransactionId(1) + .build()) + .withSchema(SchemaBuilder().withConstraints(emptySet()).withPayload(payload).build()) + .withPayload(payload) + .build() + + val routingConf = mutableListOf() + routingConf.addAll(RoutingConfigurationFactory.getRoutingConfiguration("topic2", "Label1:Label2{prop1, prop2}", EntityType.node) as List) + routingConf.addAll(RoutingConfigurationFactory.getRoutingConfiguration("topic3", "Label1{*}", EntityType.node) as List) + routingConf.addAll(RoutingConfigurationFactory.getRoutingConfiguration("topic4", "Label2{-prop1}", EntityType.node) as List) + val expectedTopics = setOf("topic3", "topic4", "topic2") + + //When + val events = NodeRoutingConfiguration.prepareEvent( + streamsTransactionEvent = streamsEvent, + routingConf = routingConf) + + // Then + assertEquals(3, events.size) + assertTrue { events.keys.containsAll(expectedTopics) } + + assertFalse { events["topic2"]!!.payload.before!!.properties!!.containsKey("prop3") } + assertFalse { events["topic2"]!!.payload.after!!.properties!!.containsKey("prop3") } + assertFalse { events["topic2"]!!.payload.after!!.properties!!.containsKey("prop4") } + var nodeBefore = events["topic2"]!!.payload.before as NodeChange + var nodeAfter = events["topic2"]!!.payload.after as NodeChange + assertTrue { nodeBefore.labels!!.toSet().containsAll(setOf("Label1", "Label2")) } + assertTrue { nodeAfter.labels!!.toSet().containsAll(setOf("Label1", "Label2")) } + + assertTrue { events["topic3"]!!.payload.before!!.properties!!.containsKey("prop1") } + assertTrue { events["topic3"]!!.payload.before!!.properties!!.containsKey("prop2") } + assertTrue { events["topic3"]!!.payload.before!!.properties!!.containsKey("prop3") } + assertTrue { events["topic3"]!!.payload.after!!.properties!!.containsKey("prop1") } + assertTrue { events["topic3"]!!.payload.after!!.properties!!.containsKey("prop2") } + assertTrue { events["topic3"]!!.payload.after!!.properties!!.containsKey("prop3") } + assertTrue { events["topic3"]!!.payload.after!!.properties!!.containsKey("prop4") } + nodeBefore = events["topic3"]!!.payload.before as NodeChange + nodeAfter = events["topic3"]!!.payload.after as NodeChange + assertTrue { nodeBefore.labels!!.toSet().containsAll(setOf("Label1", "Label2")) } + assertTrue { nodeAfter.labels!!.toSet().containsAll(setOf("Label1", "Label2")) } + + assertFalse { events["topic4"]!!.payload.before!!.properties!!.containsKey("prop1") } + assertFalse { events["topic4"]!!.payload.after!!.properties!!.containsKey("prop1") } + nodeBefore = events["topic4"]!!.payload.before as NodeChange + nodeAfter = events["topic4"]!!.payload.after as NodeChange + assertTrue { nodeBefore.labels!!.toSet().containsAll(setOf("Label1", "Label2")) } + assertTrue { nodeAfter.labels!!.toSet().containsAll(setOf("Label1", "Label2")) } + } + + @Test + fun shouldFilterAndRouteRelationshipEvents() { + // Given + val payload = RelationshipPayloadBuilder() + .withBefore(RelationshipChange(properties = mapOf("prop1" to 1, "prop2" to "pippo", "prop3" to 3))) + .withAfter(RelationshipChange(properties = mapOf("prop1" to 1, "prop2" to "pippo", "prop3" to 3, "prop4" to 4))) + .withStartNode("1", listOf("Label1", "Label2"), emptyMap()) + .withEndNode("2", listOf("Label1", "Label2"), emptyMap()) + .withName("KNOWS") + .build() + val streamsEvent = StreamsTransactionEventBuilder() + .withMeta(StreamsEventMetaBuilder() + .withOperation(OperationType.created) + .withTimestamp(System.currentTimeMillis()) + .withTransactionEventId(1) + .withTransactionEventsCount(1) + .withUsername("user") + .withTransactionId(1) + .build()) + .withSchema(SchemaBuilder().withConstraints(emptySet()).withPayload(payload).build()) + .withPayload(payload) + .build() + + val routingConf = mutableListOf() + routingConf.addAll(RoutingConfigurationFactory.getRoutingConfiguration("topic2", "KNOWS{prop1, prop2}", EntityType.relationship) as List) + routingConf.addAll(RoutingConfigurationFactory.getRoutingConfiguration("topic3", "KNOWS{*}", EntityType.relationship) as List) + routingConf.addAll(RoutingConfigurationFactory.getRoutingConfiguration("topic4", "KNOWS{-prop1}", EntityType.relationship) as List) + var expectedTopics = setOf("topic3", "topic4", "topic2") + + //When + val events = RelationshipRoutingConfiguration.prepareEvent( + streamsTransactionEvent = streamsEvent, + routingConf = routingConf) + + // Then + assertEquals(3, events.size) + assertTrue { events.keys.containsAll(expectedTopics) } + + assertFalse { events["topic2"]!!.payload.before!!.properties!!.containsKey("prop3") } + assertFalse { events["topic2"]!!.payload.after!!.properties!!.containsKey("prop3") } + assertFalse { events["topic2"]!!.payload.after!!.properties!!.containsKey("prop4") } + + assertTrue { events["topic3"]!!.payload.before!!.properties!!.containsKey("prop1") } + assertTrue { events["topic3"]!!.payload.before!!.properties!!.containsKey("prop2") } + assertTrue { events["topic3"]!!.payload.before!!.properties!!.containsKey("prop3") } + assertTrue { events["topic3"]!!.payload.after!!.properties!!.containsKey("prop1") } + assertTrue { events["topic3"]!!.payload.after!!.properties!!.containsKey("prop2") } + assertTrue { events["topic3"]!!.payload.after!!.properties!!.containsKey("prop3") } + assertTrue { events["topic3"]!!.payload.after!!.properties!!.containsKey("prop4") } + + assertFalse { events["topic4"]!!.payload.before!!.properties!!.containsKey("prop1") } + assertFalse { events["topic4"]!!.payload.after!!.properties!!.containsKey("prop1") } + + } + +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/producer/events/StreamsEventBuilderTest.kt b/extended/src/test/kotlin/apoc/kafka/producer/events/StreamsEventBuilderTest.kt new file mode 100644 index 0000000000..b39efc6d3d --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/producer/events/StreamsEventBuilderTest.kt @@ -0,0 +1,409 @@ +package apoc.kafka.producer.events + +import apoc.kafka.producer.NodeRoutingConfiguration +import apoc.kafka.producer.RelationshipRoutingConfiguration +import apoc.kafka.producer.toMap +import org.junit.Test +import org.mockito.Mockito +import org.neo4j.graphdb.* +import kotlin.test.assertEquals + +class StreamsEventBuilderTest { + + @Test + fun shouldCreateSimpleTypes() { + // Given + val payload = "Test" + + // When + val result = StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(payload) + .build() + + // Then + assertEquals(payload, result.payload) + } + + @Test + fun shouldCreateNode() { + // Given + val payload = Mockito.mock(Node::class.java) + Mockito.`when`(payload.id).thenReturn(1) + Mockito.`when`(payload.labels).thenReturn(listOf(Label.label("Foo"), Label.label("Bar"))) + Mockito.`when`(payload.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + + // When + val result = StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(payload) + .build() + + // Then + assertEquals(payload.toMap(), result.payload) + } + + @Test + fun shouldCreateNodeWithIncludedProperties() { + // Given + val nodeRouting = NodeRoutingConfiguration(all = false, labels = listOf("Foo"), include = listOf("prop1")) + val payload = Mockito.mock(Node::class.java) + Mockito.`when`(payload.id).thenReturn(1) + Mockito.`when`(payload.labels).thenReturn(listOf(Label.label("Foo"), Label.label("Bar"))) + Mockito.`when`(payload.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + + // When + val result = StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(payload) + .withNodeRoutingConfiguration(nodeRouting) + .build() + + // Then + val payloadAsMap = payload.toMap().toMutableMap() + payloadAsMap["properties"] = payload.allProperties.filter { nodeRouting.include.contains(it.key) } + val expected = payloadAsMap.toMap() + assertEquals(expected, result.payload) + } + + @Test + fun shouldCreateNodeWithoutExcludedProperties() { + // Given + val nodeRouting = NodeRoutingConfiguration(all = false, labels = listOf("Foo"), exclude = listOf("prop1")) + val payload = Mockito.mock(Node::class.java) + Mockito.`when`(payload.id).thenReturn(1) + Mockito.`when`(payload.labels).thenReturn(listOf(Label.label("Foo"), Label.label("Bar"))) + Mockito.`when`(payload.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + + // When + val result = StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(payload) + .withNodeRoutingConfiguration(nodeRouting) + .build() + + // Then + val payloadAsMap = payload.toMap().toMutableMap() + payloadAsMap["properties"] = payload.allProperties.filter { !nodeRouting.exclude.contains(it.key) } + val expected = payloadAsMap.toMap() + assertEquals(expected, result.payload) + } + + @Test + fun shouldCreateRelationship() { + // Given + val mockedStartNode = Mockito.mock(Node::class.java) + Mockito.`when`(mockedStartNode.id).thenReturn(1) + Mockito.`when`(mockedStartNode.labels).thenReturn(listOf(Label.label("Foo"), Label.label("Bar"))) + Mockito.`when`(mockedStartNode.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + val mockedEndNode = Mockito.mock(Node::class.java) + Mockito.`when`(mockedEndNode.id).thenReturn(2) + Mockito.`when`(mockedEndNode.labels).thenReturn(listOf(Label.label("FooEnd"), Label.label("BarEnd"))) + Mockito.`when`(mockedEndNode.allProperties).thenReturn(mapOf("prop" to "fooEnd", "prop1" to "barEnd")) + val payload = Mockito.mock(Relationship::class.java) + Mockito.`when`(payload.id).thenReturn(10) + Mockito.`when`(payload.type).thenReturn(RelationshipType.withName("KNOWS")) + Mockito.`when`(payload.startNode).thenReturn(mockedStartNode) + Mockito.`when`(payload.endNode).thenReturn(mockedEndNode) + Mockito.`when`(payload.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + + // When + val result = StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(payload) + .build() + + // Then + assertEquals(payload.toMap(), result.payload) + } + + @Test + fun shouldCreateRelationshipWithIncludedProperties() { + // Given + val relRouting = RelationshipRoutingConfiguration(all = false, name = "KNOWS", include = listOf("prop1")) + val mockedStartNode = Mockito.mock(Node::class.java) + Mockito.`when`(mockedStartNode.id).thenReturn(1) + Mockito.`when`(mockedStartNode.labels).thenReturn(listOf(Label.label("Foo"), Label.label("Bar"))) + Mockito.`when`(mockedStartNode.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + val mockedEndNode = Mockito.mock(Node::class.java) + Mockito.`when`(mockedEndNode.id).thenReturn(2) + Mockito.`when`(mockedEndNode.labels).thenReturn(listOf(Label.label("FooEnd"), Label.label("BarEnd"))) + Mockito.`when`(mockedEndNode.allProperties).thenReturn(mapOf("prop" to "fooEnd", "prop1" to "barEnd")) + val payload = Mockito.mock(Relationship::class.java) + Mockito.`when`(payload.id).thenReturn(10) + Mockito.`when`(payload.type).thenReturn(RelationshipType.withName("KNOWS")) + Mockito.`when`(payload.startNode).thenReturn(mockedStartNode) + Mockito.`when`(payload.endNode).thenReturn(mockedEndNode) + Mockito.`when`(payload.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + + // When + val result = StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(payload) + .withRelationshipRoutingConfiguration(relRouting) + .build() + + // Then + val payloadAsMap = payload.toMap().toMutableMap() + payloadAsMap["properties"] = payload.allProperties.filter { relRouting.include.contains(it.key) } + assertEquals(payloadAsMap.toMap(), result.payload) + } + + @Test + fun shouldCreateRelationshipWithoutExcludedProperties() { + // Given + val relRouting = RelationshipRoutingConfiguration(all = false, name = "KNOWS", exclude = listOf("prop1")) + val mockedStartNode = Mockito.mock(Node::class.java) + Mockito.`when`(mockedStartNode.id).thenReturn(1) + Mockito.`when`(mockedStartNode.labels).thenReturn(listOf(Label.label("Foo"), Label.label("Bar"))) + Mockito.`when`(mockedStartNode.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + val mockedEndNode = Mockito.mock(Node::class.java) + Mockito.`when`(mockedEndNode.id).thenReturn(2) + Mockito.`when`(mockedEndNode.labels).thenReturn(listOf(Label.label("FooEnd"), Label.label("BarEnd"))) + Mockito.`when`(mockedEndNode.allProperties).thenReturn(mapOf("prop" to "fooEnd", "prop1" to "barEnd")) + val payload = Mockito.mock(Relationship::class.java) + Mockito.`when`(payload.id).thenReturn(10) + Mockito.`when`(payload.type).thenReturn(RelationshipType.withName("KNOWS")) + Mockito.`when`(payload.startNode).thenReturn(mockedStartNode) + Mockito.`when`(payload.endNode).thenReturn(mockedEndNode) + Mockito.`when`(payload.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + + // When + val result = StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(payload) + .withRelationshipRoutingConfiguration(relRouting) + .build() + + // Then + val payloadAsMap = payload.toMap().toMutableMap() + payloadAsMap["properties"] = payload.allProperties.filter { !relRouting.exclude.contains(it.key) } + assertEquals(payloadAsMap.toMap(), result.payload) + } + + @Test + fun shouldReturnSimpleMap() { + // Given + val payload = mapOf("foo" to "bar", "bar" to 10, "prop" to listOf(1, "two", null, mapOf("foo" to "bar"))) + + // When + val result = StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(payload) + .build() + + // Then + assertEquals(payload, result.payload) + } + + @Test + fun shouldReturnSimpleList() { + // Given + val payload = listOf("3", 2, 1, mapOf("foo" to "bar", "bar" to 10, "prop" to listOf(1, "two", null, mapOf("foo" to "bar")))) + + // When + val result = StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(payload) + .build() + + // Then + assertEquals(payload, result.payload) + } + + @Test + fun shouldReturnMapWithComplexTypes() { + // Given + val mockedStartNode = Mockito.mock(Node::class.java) + Mockito.`when`(mockedStartNode.id).thenReturn(1) + Mockito.`when`(mockedStartNode.labels).thenReturn(listOf(Label.label("Foo"), Label.label("Bar"))) + Mockito.`when`(mockedStartNode.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + val mockedEndNode = Mockito.mock(Node::class.java) + Mockito.`when`(mockedEndNode.id).thenReturn(2) + Mockito.`when`(mockedEndNode.labels).thenReturn(listOf(Label.label("FooEnd"), Label.label("BarEnd"))) + Mockito.`when`(mockedEndNode.allProperties).thenReturn(mapOf("prop" to "fooEnd", "prop1" to "barEnd")) + val relationship = Mockito.mock(Relationship::class.java) + Mockito.`when`(relationship.id).thenReturn(10) + Mockito.`when`(relationship.type).thenReturn(RelationshipType.withName("KNOWS")) + Mockito.`when`(relationship.startNode).thenReturn(mockedStartNode) + Mockito.`when`(relationship.endNode).thenReturn(mockedEndNode) + Mockito.`when`(relationship.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + + val node = Mockito.mock(Node::class.java) + Mockito.`when`(mockedStartNode.id).thenReturn(10) + Mockito.`when`(mockedStartNode.labels).thenReturn(listOf(Label.label("FooNode"), Label.label("BarNode"))) + Mockito.`when`(mockedStartNode.allProperties).thenReturn(mapOf("prop" to "fooNode", "prop1" to "barNode")) + + val payload = mapOf("node" to node, + "relationship" to relationship, + "prop" to listOf(1, "two", null, mapOf("foo" to "bar"))) + + // When + val result = StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(payload) + .build() + + // Then + val payloadAsMutableMap = payload.toMutableMap() + payloadAsMutableMap["node"] = (payloadAsMutableMap["node"] as Node).toMap() + payloadAsMutableMap["relationship"] = (payloadAsMutableMap["relationship"] as Relationship).toMap() + assertEquals(payloadAsMutableMap.toMap(), result.payload) + } + + @Test + fun shouldReturnMapWithComplexTypesFiltered() { + // Given + val nodeRouting = NodeRoutingConfiguration(all = false, labels = listOf("Foo"), include = listOf("prop1")) + val relRouting = RelationshipRoutingConfiguration(all = false, name = "KNOWS", include = listOf("prop1")) + val mockedStartNode = Mockito.mock(Node::class.java) + Mockito.`when`(mockedStartNode.id).thenReturn(1) + Mockito.`when`(mockedStartNode.labels).thenReturn(listOf(Label.label("Foo"), Label.label("Bar"))) + Mockito.`when`(mockedStartNode.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + val mockedEndNode = Mockito.mock(Node::class.java) + Mockito.`when`(mockedEndNode.id).thenReturn(2) + Mockito.`when`(mockedEndNode.labels).thenReturn(listOf(Label.label("FooEnd"), Label.label("BarEnd"))) + Mockito.`when`(mockedEndNode.allProperties).thenReturn(mapOf("prop" to "fooEnd", "prop1" to "barEnd")) + val relationship = Mockito.mock(Relationship::class.java) + Mockito.`when`(relationship.id).thenReturn(10) + Mockito.`when`(relationship.type).thenReturn(RelationshipType.withName("KNOWS")) + Mockito.`when`(relationship.startNode).thenReturn(mockedStartNode) + Mockito.`when`(relationship.endNode).thenReturn(mockedEndNode) + Mockito.`when`(relationship.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + + val node = Mockito.mock(Node::class.java) + Mockito.`when`(mockedStartNode.id).thenReturn(10) + Mockito.`when`(mockedStartNode.labels).thenReturn(listOf(Label.label("FooNode"), Label.label("BarNode"))) + Mockito.`when`(mockedStartNode.allProperties).thenReturn(mapOf("prop" to "fooNode", "prop1" to "barNode")) + + val payload = mapOf("node" to node, + "relationship" to relationship, + "prop" to listOf(1, "two", null, mapOf("foo" to "bar"))) + + // When + val resultNode = StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(node) + .withNodeRoutingConfiguration(nodeRouting) + .build() + val resultRelationship = StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(relationship) + .withRelationshipRoutingConfiguration(relRouting) + .build() + val result = StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(payload) + .withRelationshipRoutingConfiguration(relRouting) + .withNodeRoutingConfiguration(nodeRouting) + .build() + + // Then + val payloadAsMutableMap = payload.toMutableMap() + payloadAsMutableMap["node"] = resultNode.payload + payloadAsMutableMap["relationship"] = resultRelationship.payload + assertEquals(payloadAsMutableMap.toMap(), result.payload) + } + + @Test + fun shouldReturnPath() { + // Given + val mockedStartNode = Mockito.mock(Node::class.java) + Mockito.`when`(mockedStartNode.id).thenReturn(1) + Mockito.`when`(mockedStartNode.labels).thenReturn(listOf(Label.label("Foo"), Label.label("Bar"))) + Mockito.`when`(mockedStartNode.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + val mockedEndNode = Mockito.mock(Node::class.java) + Mockito.`when`(mockedEndNode.id).thenReturn(2) + Mockito.`when`(mockedEndNode.labels).thenReturn(listOf(Label.label("FooEnd"), Label.label("BarEnd"))) + Mockito.`when`(mockedEndNode.allProperties).thenReturn(mapOf("prop" to "fooEnd", "prop1" to "barEnd")) + val relationship = Mockito.mock(Relationship::class.java) + Mockito.`when`(relationship.id).thenReturn(10) + Mockito.`when`(relationship.type).thenReturn(RelationshipType.withName("KNOWS")) + Mockito.`when`(relationship.startNode).thenReturn(mockedStartNode) + Mockito.`when`(relationship.endNode).thenReturn(mockedEndNode) + Mockito.`when`(relationship.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + val payload = Mockito.mock(Path::class.java) + Mockito.`when`(payload.relationships()).thenReturn(listOf(relationship)) + Mockito.`when`(payload.nodes()).thenReturn(listOf(mockedStartNode, mockedEndNode)) + Mockito.`when`(payload.startNode()).thenReturn(mockedStartNode) + Mockito.`when`(payload.endNode()).thenReturn(mockedEndNode) + Mockito.`when`(payload.length()).thenReturn(1) + + // When + val result = StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(payload) + .build() + + // Then + val nodes = payload.nodes().map { StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(it) + .build() + .payload + } + val rels = payload.relationships().map { StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(it) + .build() + .payload + } + val expectedPath = mapOf("length" to 1, "nodes" to nodes, "rels" to rels) + assertEquals(expectedPath, result.payload) + } + + @Test + fun shouldReturnPathWithFilteredProperties() { + // Given + val nodeRouting = NodeRoutingConfiguration(all = false, labels = listOf("Foo"), include = listOf("prop1")) + val relRouting = RelationshipRoutingConfiguration(all = false, name = "KNOWS", include = listOf("prop1")) + val mockedStartNode = Mockito.mock(Node::class.java) + Mockito.`when`(mockedStartNode.id).thenReturn(1) + Mockito.`when`(mockedStartNode.labels).thenReturn(listOf(Label.label("Foo"), Label.label("Bar"))) + Mockito.`when`(mockedStartNode.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + val mockedEndNode = Mockito.mock(Node::class.java) + Mockito.`when`(mockedEndNode.id).thenReturn(2) + Mockito.`when`(mockedEndNode.labels).thenReturn(listOf(Label.label("FooEnd"), Label.label("BarEnd"))) + Mockito.`when`(mockedEndNode.allProperties).thenReturn(mapOf("prop" to "fooEnd", "prop1" to "barEnd")) + val relationship = Mockito.mock(Relationship::class.java) + Mockito.`when`(relationship.id).thenReturn(10) + Mockito.`when`(relationship.type).thenReturn(RelationshipType.withName("KNOWS")) + Mockito.`when`(relationship.startNode).thenReturn(mockedStartNode) + Mockito.`when`(relationship.endNode).thenReturn(mockedEndNode) + Mockito.`when`(relationship.allProperties).thenReturn(mapOf("prop" to "foo", "prop1" to "bar")) + val payload = Mockito.mock(Path::class.java) + Mockito.`when`(payload.relationships()).thenReturn(listOf(relationship)) + Mockito.`when`(payload.nodes()).thenReturn(listOf(mockedStartNode, mockedEndNode)) + Mockito.`when`(payload.startNode()).thenReturn(mockedStartNode) + Mockito.`when`(payload.endNode()).thenReturn(mockedEndNode) + Mockito.`when`(payload.length()).thenReturn(1) + + // When + val result = StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(payload) + .withRelationshipRoutingConfiguration(relRouting) + .withNodeRoutingConfiguration(nodeRouting) + .build() + + // Then + val nodes = payload.nodes().map { StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(it) + .withNodeRoutingConfiguration(nodeRouting) + .build() + .payload + } + val rels = payload.relationships().map { StreamsEventBuilder() + .withTopic("neo4j") + .withPayload(it) + .withRelationshipRoutingConfiguration(relRouting) + .build() + .payload + } + val expectedPath = mapOf("length" to 1, "nodes" to nodes, "rels" to rels) + assertEquals(expectedPath, result.payload) + } + +} + diff --git a/extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterBaseTSE.kt b/extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterBaseTSE.kt new file mode 100644 index 0000000000..4e997f418b --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterBaseTSE.kt @@ -0,0 +1,78 @@ +package apoc.kafka.producer.integrations + +import apoc.ExtendedApocConfig.APOC_KAFKA_ENABLED +import apoc.kafka.common.support.KafkaTestUtils +import apoc.kafka.common.support.KafkaTestUtils.getDbServices +import apoc.kafka.common.utils.Neo4jUtilsTest +import apoc.util.DbmsTestUtil +import org.apache.kafka.clients.consumer.KafkaConsumer +import org.junit.* +import org.junit.jupiter.api.AfterEach +import org.junit.jupiter.api.BeforeEach +import org.junit.rules.TemporaryFolder +import org.neo4j.dbms.api.DatabaseManagementService +import org.neo4j.graphdb.GraphDatabaseService + +open class KafkaEventRouterBaseTSE { // TSE (Test Suit Element) + + companion object { + + private var startedFromSuite = true + lateinit var db: GraphDatabaseService + lateinit var dbms: DatabaseManagementService + + @BeforeClass + @JvmStatic + fun setUpContainer() { + if (!KafkaEventRouterSuiteIT.isRunning) { + startedFromSuite = false + KafkaEventRouterSuiteIT.setUpContainer() + } + } + + @AfterClass + @JvmStatic + fun tearDownContainer() { + if (!startedFromSuite) { + KafkaEventRouterSuiteIT.tearDownContainer() + } + } + } + + lateinit var kafkaConsumer: KafkaConsumer + + @JvmField + @Rule + var temporaryFolder = TemporaryFolder() + + @Before + @BeforeEach + fun setUp() { + kafkaConsumer = KafkaTestUtils.createConsumer(bootstrapServers = KafkaEventRouterSuiteIT.kafka.bootstrapServers) + } + + + @After + @AfterEach + fun tearDown() { + dbms.shutdown() + kafkaConsumer.close() + } + + fun createDbWithKafkaConfigs(vararg pairs: Pair) : GraphDatabaseService { + val mutableMapOf = mutableMapOf( + APOC_KAFKA_ENABLED to "true", + Neo4jUtilsTest.KAFKA_BOOTSTRAP_SERVER to KafkaEventRouterSuiteIT.kafka.bootstrapServers + ) + + mutableMapOf.putAll(mapOf(*pairs)) + + + dbms = DbmsTestUtil.startDbWithApocConfigs( + temporaryFolder, + mutableMapOf + ) + + return getDbServices(dbms) + } +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterProcedureTSE.kt b/extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterProcedureTSE.kt new file mode 100644 index 0000000000..969e10ad3a --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterProcedureTSE.kt @@ -0,0 +1,289 @@ +package apoc.kafka.producer.integrations + +import apoc.kafka.events.StreamsEvent +import apoc.kafka.extensions.execute +// import apoc.kafka.support.start +import apoc.kafka.utils.JSONUtils +import apoc.util.ExtendedTestUtil +import org.apache.kafka.clients.admin.AdminClient +import org.apache.kafka.clients.admin.NewTopic +import org.junit.Test +import org.neo4j.graphdb.QueryExecutionException +import org.neo4j.graphdb.Result +import java.util.* +import kotlin.test.assertEquals +import kotlin.test.assertFailsWith +import kotlin.test.assertFalse +import kotlin.test.assertTrue +import kotlin.test.assertNotNull + +class KafkaEventRouterProcedureTSE : KafkaEventRouterBaseTSE() { + + @Test + fun testProcedure() { + val db = createDbWithKafkaConfigs() + + val topic = UUID.randomUUID().toString() + kafkaConsumer.subscribe(listOf(topic)) + val message = "Hello World" + db.execute("CALL apoc.kafka.publish('$topic', '$message')") + val records = kafkaConsumer.poll(5000) + assertEquals(1, records.count()) + assertTrue { records.all { + JSONUtils.readValue(it.value(), StreamsEvent::class.java).let { + message == it.payload + } + }} + } + + @Test + fun testProcedureWithKey() { + val db = createDbWithKafkaConfigs() + val topic = UUID.randomUUID().toString() + kafkaConsumer.subscribe(listOf(topic)) + val message = "Hello World" + val keyRecord = "test" + db.execute("CALL apoc.kafka.publish('$topic', '$message', {key: '$keyRecord'} )") + val records = kafkaConsumer.poll(5000) + assertEquals(1, records.count()) + assertTrue { records.all { + JSONUtils.readValue(it.value(), StreamsEvent::class.java).payload == message + && ExtendedTestUtil.readValue(it.key()) == keyRecord + }} + } + + @Test + fun testProcedureWithKeyAsMap() { + val db = createDbWithKafkaConfigs() + val topic = UUID.randomUUID().toString() + kafkaConsumer.subscribe(listOf(topic)) + val message = "Hello World" + val keyRecord = mapOf("one" to "Foo", "two" to "Baz", "three" to "Bar") + db.execute("CALL apoc.kafka.publish('$topic', '$message', {key: \$key } )", mapOf("key" to keyRecord)) + val records = kafkaConsumer.poll(5000) + assertEquals(1, records.count()) + assertTrue { records.all { + JSONUtils.readValue(it.value(), StreamsEvent::class.java).payload == message + }} + } + + @Test + fun testProcedureWithPartitionAsNotNumber() { + val db = createDbWithKafkaConfigs() + // db.start() + val topic = UUID.randomUUID().toString() + kafkaConsumer.subscribe(listOf(topic)) + val message = "Hello World" + val keyRecord = "test" + val partitionRecord = "notNumber" + assertFailsWith(QueryExecutionException::class) { + db.execute("CALL apoc.kafka.publish('$topic', '$message', {key: '$keyRecord', partition: '$partitionRecord' })") + } + } + + @Test + fun testProcedureWithPartitionAndKey() { + val db = createDbWithKafkaConfigs() + // db.start() + val topic = UUID.randomUUID().toString() + kafkaConsumer.subscribe(listOf(topic)) + val message = "Hello World" + val keyRecord = "test" + val partitionRecord = 0 + db.execute("CALL apoc.kafka.publish('$topic', '$message', {key: '$keyRecord', partition: $partitionRecord })") + val records = kafkaConsumer.poll(5000) + assertEquals(1, records.count()) + assertTrue{ records.all { + JSONUtils.readValue(it.value(), StreamsEvent::class.java).payload == message + && ExtendedTestUtil.readValue(it.key()) == keyRecord + && partitionRecord == it.partition() + }} + } + + @Test + fun testCantPublishNull() { + val db = createDbWithKafkaConfigs() + setUpProcedureTests() + assertFailsWith(RuntimeException::class) { + db.execute("CALL apoc.kafka.publish('neo4j', null)") + } + } + + @Test + fun testProcedureSyncWithNode() { + val db = createDbWithKafkaConfigs() + setUpProcedureTests() + db.execute("CREATE (n:Baz {age: 23, name: 'Foo', surname: 'Bar'})") + + db.execute("MATCH (n:Baz) \n" + + "CALL apoc.kafka.publish.sync('neo4j', n) \n" + + "YIELD value \n" + + "RETURN value") { + assertSyncResult(it) + } + + val records = kafkaConsumer.poll(5000) + assertEquals(1, records.count()) + assertEquals(3, ((records.map { + JSONUtils.readValue(it.value(), StreamsEvent::class.java).payload + }[0] as Map)["properties"] as Map).size) + } + + @Test + fun testProcedureSync() { + val db = createDbWithKafkaConfigs() + setUpProcedureTests() + val message = "Hello World" + db.execute("CALL apoc.kafka.publish.sync('neo4j', '$message')") { + assertSyncResult(it) + } + + val records = kafkaConsumer.poll(5000) + assertEquals(1, records.count()) + assertTrue { records.all { + JSONUtils.readValue(it.value(), StreamsEvent::class.java).payload == message + }} + } + + + @Test + fun testProcedureWithRelationship() { + val db = createDbWithKafkaConfigs() + setUpProcedureTests() + db.execute("CREATE (:Foo {one: 'two'})-[:KNOWS {alpha: 'beta'}]->(:Bar {three: 'four'})") + + db.execute(""" + MATCH (:Foo)-[r:KNOWS]->(:Bar) + |CALL apoc.kafka.publish.sync('neo4j', r) + |YIELD value RETURN value""".trimMargin()) { + assertSyncResult(it) + } + val records = kafkaConsumer.poll(5000) + assertEquals(1, records.count()) + + val payload = JSONUtils.readValue(records.first().value(), StreamsEvent::class.java).payload as Map + assertTrue(payload["id"] is String) + assertEquals(mapOf("alpha" to "beta"), payload["properties"]) + assertEquals("KNOWS", payload["label"]) + assertEquals("relationship", payload["type"]) + val start = payload["start"] as Map + assertEquals(listOf("Foo"), start["labels"]) + assertEquals(mapOf("one" to "two"), start["properties"]) + assertEquals("node", start["type"]) + val end = payload["end"] as Map + assertEquals(listOf("Bar"), end["labels"]) + assertEquals(mapOf("three" to "four"), end["properties"]) + assertEquals("node", end["type"]) + } + + @Test + fun testProcedureSyncWithKeyNull() { + val db = createDbWithKafkaConfigs() + setUpProcedureTests() + db.execute("CREATE (n:Foo {id: 1, name: 'Bar'})") + + val message = "Hello World" + db.execute("MATCH (n:Foo {id: 1}) CALL apoc.kafka.publish.sync('neo4j', '$message', {key: n.foo}) YIELD value RETURN value") { + assertSyncResult(it) + } + + val records = kafkaConsumer.poll(5000) + assertEquals(1, records.count()) + assertTrue { records.all { + JSONUtils.readValue(it.value(), StreamsEvent::class.java).payload == message + && it.key() == null + }} + } + + @Test + fun testProcedureSyncWithConfig() { + val db = createDbWithKafkaConfigs() + AdminClient.create(mapOf("bootstrap.servers" to KafkaEventRouterSuiteIT.kafka.bootstrapServers)).use { + val topic = UUID.randomUUID().toString() + + it.createTopics(listOf(NewTopic(topic, 5, 1))) + .all() + .get() + kafkaConsumer.subscribe(listOf(topic)) + + val message = "Hello World" + val keyRecord = "test" + val partitionRecord = 1 + db.execute("CALL apoc.kafka.publish.sync('$topic', '$message', {key: '$keyRecord', partition: $partitionRecord })") { + assertSyncResult(it) + } + + val records = kafkaConsumer.poll(5000) + assertEquals(1, records.count()) + assertEquals(1, records.count { it.partition() == 1 }) + assertTrue{ records.all { + JSONUtils.readValue(it.value(), StreamsEvent::class.java).payload == message + && ExtendedTestUtil.readValue(it.key()) == keyRecord + && partitionRecord == it.partition() + }} + } + } + + @Test + fun testProcedureWithTopicWithMultiplePartitionAndKey() { + val db = createDbWithKafkaConfigs() + AdminClient.create(mapOf("bootstrap.servers" to KafkaEventRouterSuiteIT.kafka.bootstrapServers)).use { + val topic = UUID.randomUUID().toString() + + it.createTopics(listOf(NewTopic(topic, 3, 1))) + .all() + .get() + kafkaConsumer.subscribe(listOf(topic)) + + val message = "Hello World" + val keyRecord = "test" + val partitionRecord = 2 + db.execute("CALL apoc.kafka.publish('$topic', '$message', {key: '$keyRecord', partition: $partitionRecord })") + + val records = kafkaConsumer.poll(5000) + assertEquals(1, records.count()) + assertEquals(1, records.count { it.partition() == 2 }) + assertTrue{ records.all { + JSONUtils.readValue(it.value(), StreamsEvent::class.java).payload == message + && ExtendedTestUtil.readValue(it.key()) == keyRecord + && partitionRecord == it.partition() + }} + } + } + + @Test + fun testProcedureSendMessageToNotExistentPartition() { + val db = createDbWithKafkaConfigs() + AdminClient.create(mapOf("bootstrap.servers" to KafkaEventRouterSuiteIT.kafka.bootstrapServers)).use { + val topic = UUID.randomUUID().toString() + + it.createTopics(listOf(NewTopic(topic, 3, 1))) + .all() + .get() + kafkaConsumer.subscribe(listOf(topic)) + + val message = "Hello World" + val keyRecord = "test" + val partitionRecord = 9 + db.execute("CALL apoc.kafka.publish('$topic', '$message', {key: '$keyRecord', partition: $partitionRecord })") + + val records = kafkaConsumer.poll(5000) + assertEquals(0, records.count()) + } + } + + private fun setUpProcedureTests() { + kafkaConsumer.subscribe(listOf("neo4j")) + } + + private fun assertSyncResult(it: Result) { + assertTrue { it.hasNext() } + val resultMap = (it.next())["value"] as Map + assertNotNull(resultMap["offset"]) + assertNotNull(resultMap["partition"]) + assertNotNull(resultMap["keySize"]) + assertNotNull(resultMap["valueSize"]) + assertNotNull(resultMap["timestamp"]) + assertFalse { it.hasNext() } + } +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterSuiteIT.kt b/extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterSuiteIT.kt new file mode 100644 index 0000000000..e04bf40246 --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterSuiteIT.kt @@ -0,0 +1,53 @@ +package apoc.kafka.producer.integrations + +import apoc.kafka.utils.KafkaUtil +import org.junit.AfterClass +import org.junit.Assume +import org.junit.BeforeClass +import org.neo4j.graphdb.GraphDatabaseService +import org.testcontainers.containers.KafkaContainer +import org.testcontainers.containers.Network + +class KafkaEventRouterSuiteIT { + + companion object { + /** + * Kafka TestContainers uses Confluent OSS images. + * We need to keep in mind which is the right Confluent Platform version for the Kafka version this project uses + * + * Confluent Platform | Apache Kafka + * | + * 4.0.x | 1.0.x + * 4.1.x | 1.1.x + * 5.0.x | 2.0.x + * + * Please see also https://docs.confluent.io/current/installation/versions-interoperability.html#cp-and-apache-kafka-compatibility + */ + private const val confluentPlatformVersion = "5.3.1-1" + @JvmStatic + lateinit var kafka: KafkaContainer + + var isRunning = false + + @BeforeClass @JvmStatic + fun setUpContainer() { + var exists = false + KafkaUtil.ignoreExceptions({ + kafka = KafkaContainer(confluentPlatformVersion) + .withNetwork(Network.newNetwork()) + kafka.start() + exists = true + }, IllegalStateException::class.java) + Assume.assumeTrue("Kafka container has to exist", exists) + Assume.assumeTrue("Kafka must be running", Companion::kafka.isInitialized && kafka.isRunning) + } + + @AfterClass @JvmStatic + fun tearDownContainer() { + KafkaUtil.ignoreExceptions({ + kafka.stop() + }, UninitializedPropertyAccessException::class.java) + } + } + +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterTestCommon.kt b/extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterTestCommon.kt new file mode 100644 index 0000000000..8407423b7a --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/producer/integrations/KafkaEventRouterTestCommon.kt @@ -0,0 +1,53 @@ +package apoc.kafka.producer.integrations + +import apoc.kafka.extensions.execute +import apoc.kafka.common.support.Assert +import org.apache.kafka.clients.admin.AdminClient +import org.apache.kafka.clients.admin.NewTopic +import org.apache.kafka.clients.consumer.ConsumerRecords +import org.apache.kafka.clients.consumer.KafkaConsumer +import org.hamcrest.Matchers +import org.neo4j.function.ThrowingSupplier +import org.neo4j.graphdb.GraphDatabaseService +import java.time.Duration +import java.util.concurrent.TimeUnit + +object KafkaEventRouterTestCommon { + + private fun createTopic(topic: String, numTopics: Int, withCompact: Boolean) = run { + val newTopic = NewTopic(topic, numTopics, 1) + if (withCompact) { + newTopic.configs(mapOf( + "cleanup.policy" to "compact", + "segment.ms" to "10", + "retention.ms" to "1", + "min.cleanable.dirty.ratio" to "0.01")) + } + newTopic + } + + fun createTopic(topic: String, bootstrapServerMap: Map, numTopics: Int = 1, withCompact: Boolean = true) { + AdminClient.create(bootstrapServerMap).use { + val topics = listOf(createTopic(topic, numTopics, withCompact)) + it.createTopics(topics).all().get() + } + } + + fun assertTopicFilled(kafkaConsumer: KafkaConsumer, + fromBeginning: Boolean = false, + timeout: Long = 30, + assertion: (ConsumerRecords) -> Boolean = { it.count() == 1 } + ) { + Assert.assertEventually(ThrowingSupplier { + if(fromBeginning) { + kafkaConsumer.seekToBeginning(kafkaConsumer.assignment()) + } + val records = kafkaConsumer.poll(Duration.ofSeconds(5)) + assertion(records) + }, Matchers.equalTo(true), timeout, TimeUnit.SECONDS) + } + + fun initDbWithLogStrategy(db: GraphDatabaseService, strategy: String, otherConfigs: Map? = null, constraints: List? = null) { + constraints?.forEach { db.execute(it) } + } +} diff --git a/extended/src/test/kotlin/apoc/kafka/producer/kafka/KafkaConfigurationTest.kt b/extended/src/test/kotlin/apoc/kafka/producer/kafka/KafkaConfigurationTest.kt new file mode 100644 index 0000000000..ba89331945 --- /dev/null +++ b/extended/src/test/kotlin/apoc/kafka/producer/kafka/KafkaConfigurationTest.kt @@ -0,0 +1,52 @@ +package apoc.kafka.producer.kafka + +import apoc.kafka.common.utils.Neo4jUtilsTest +import org.junit.Test +import kotlin.test.assertEquals +import kotlin.test.assertFalse +import kotlin.test.assertTrue + +class KafkaConfigurationTest { + + @Test + fun shouldCreateConfiguration() { + val map = mapOf( + Neo4jUtilsTest.KAFKA_BOOTSTRAP_SERVER to "kafka:5678", + "apoc.kafka.acks" to "10", + "apoc.kafka.retries" to 1, + "apoc.kafka.batch.size" to 10, + "apoc.kafka.buffer.memory" to 1000, + "apoc.kafka.reindex.batch.size" to 1, + "apoc.kafka.session.timeout.ms" to 1, + "apoc.kafka.connection.timeout.ms" to 1, + "apoc.kafka.replication" to 2, + "apoc.kafka.transactional.id" to "foo", + "apoc.kafka.linger.ms" to 10, + "apoc.kafka.fetch.min.bytes" to 1234, + "apoc.kafka.topic.discovery.polling.interval" to 0L, + "apoc.kafka.log.compaction.strategy" to "delete") + + val kafkaConfig = KafkaConfiguration.create(map.mapValues { it.value.toString() }) + + assertFalse { kafkaConfig.extraProperties.isEmpty() } + assertTrue { kafkaConfig.extraProperties.containsKey("fetch.min.bytes") } + assertEquals(1, kafkaConfig.extraProperties.size) + + val properties = kafkaConfig.asProperties() + + assertEquals(map[Neo4jUtilsTest.KAFKA_BOOTSTRAP_SERVER], properties["bootstrap.servers"]) + assertEquals(map["apoc.kafka.acks"], properties["acks"]) + assertEquals(map["apoc.kafka.retries"], properties["retries"]) + assertEquals(map["apoc.kafka.batch.size"], properties["batch.size"]) + assertEquals(map["apoc.kafka.buffer.memory"], properties["buffer.memory"]) + assertEquals(map["apoc.kafka.reindex.batch.size"], properties["reindex.batch.size"]) + assertEquals(map["apoc.kafka.session.timeout.ms"], properties["session.timeout.ms"]) + assertEquals(map["apoc.kafka.connection.timeout.ms"], properties["connection.timeout.ms"]) + assertEquals(map["apoc.kafka.replication"], properties["replication"]) + assertEquals(map["apoc.kafka.transactional.id"], properties["transactional.id"]) + assertEquals(map["apoc.kafka.linger.ms"], properties["linger.ms"]) + assertEquals(map["apoc.kafka.fetch.min.bytes"].toString(), properties["fetch.min.bytes"]) + assertEquals(map["apoc.kafka.topic.discovery.polling.interval"], properties["topic.discovery.polling.interval"]) + assertEquals(map["apoc.kafka.log.compaction.strategy"], properties["log.compaction.strategy"]) + } +} \ No newline at end of file diff --git a/extended/src/test/kotlin/apoc/nlp/aws/AWSProceduresAPITest.kt b/extended/src/test/kotlin/apoc/nlp/aws/AWSProceduresAPITest.kt deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/extra-dependencies/kafka/build.gradle b/extra-dependencies/kafka/build.gradle new file mode 100644 index 0000000000..65354109c4 --- /dev/null +++ b/extra-dependencies/kafka/build.gradle @@ -0,0 +1,31 @@ +plugins { + id 'com.github.johnrengelman.shadow' version '7.1.0' +} + +java { + sourceCompatibility = JavaVersion.VERSION_21 + targetCompatibility = JavaVersion.VERSION_21 +} + +archivesBaseName = 'apoc-kafka-dependencies' +description = """APOC Kafka Dependencies""" + +jar { + manifest { + attributes 'Implementation-Version': version + } +} + +def kafkaVersion = "2.4.0" +def jacksonVersion = "2.17.2" + +dependencies { + implementation group: 'org.jetbrains.kotlin', name: 'kotlin-stdlib-jdk8', version: '2.1.0', commonExclusions + implementation group: 'io.ktor', name: 'ktor-jackson', version: '1.6.8', commonExclusions + implementation group: 'com.fasterxml.jackson.module', name: 'jackson-module-kotlin', version: jacksonVersion, commonExclusions + implementation group: 'org.jetbrains.kotlinx', name: 'kotlinx-coroutines-core', version: '1.4.2', commonExclusions + + implementation group: 'org.apache.kafka', name: 'kafka-clients', version: kafkaVersion, commonExclusions + + +} diff --git a/extra-dependencies/kafka/gradle/wrapper/gradle-wrapper.jar b/extra-dependencies/kafka/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000000000000000000000000000000000000..e289a2835ae88a608daec1a250312166928004e5 GIT binary patch literal 54212 zcmaI7W3XjgkTrT(b!^+VZQHhOvyN@swr$(CZTqW^?*97Se)qi=aD0)rp{0Dyr30B0LY0Q|jx{^R!d0{?5$!b<$q;xZz%zyNapa0Id{I^zH9pz_!L zhX0SFG{20vh_Ip(jkL&v^yGw;BsI+(v?Mjf^yEx~0^K6x?$P}u^{Dui^c1By6(GcU zuu<}1p$2&?Dsk~)p}}Z>6UIf_Df+#`ode+iWc@n?a0CnhAn~t1*}sRV{%5GLo3Wv@ldS`dp_RU) zW0Go^C*lhHPgNY1yE@b;S}lDT0I)zjy=!Yc5~kwjyBsy9#lo<B-drm>rrDIyfBIgDHmdTOlynaj^YNC~(=kX-xq)OEg=^y(@<7VNN5aU3ByadzwJkakX$q zXreb7ob9Or&(~c~cQ;(e9o*sHZag!bxQ9z2{cg!8un)I!blC@QKn*!3OQHj>XfwDc zdx-j8@h7r(w`XuXh{L99e`e}lPmC`IQC9~eI^PLux{-!c);?=$dsPKrF=lj4pBeEx z@eE;)Q@zE9S#PC(bx|Ea92+OvGe_Ero3U?Z;NYBJVW3}QW1-=qpJU2GLl=7l2a6I5 zy~~uBEKO&o=bTGv7H8*h;OUFE#L;S4Y;zPJOQZ)bQ~aqGJi~z%U}khSdo2xVYY$K3 z@i6lmx#m7Ni}L}m81_&+INR&X%hnKrE%_xwlPbc`NUcpNp=O?;Q~#)CI=)5vfJvz! z`iQl*VZmf2c#7r++8#xv-rOiVV+mZ820n$QLb|#vmJ=uM zIHIIzy1r)AgWZLsSU&(LwZx|3D>rko42;0CqIQH^PCY^-=2W?s0K#p`sL^-FrYC)Y zbo$)kXl~rM2vJ^!y&RD!hDiJio!%LI!a&ms)P3q43;p~Ek_>~GQL!x@LevGCEclk- zD8H;s9nd^7m7OD&anWi#;g>$QY*RxflWn(L{pA%fK9yW<3Dblnnz}HjvMLom z{D<#7ej)hISQug*VoP!yt^#d}GR?`v1p`#Xr6S}Pg=b-UvPn25MCmco+uC74K;*2o z7`U~o0-63$Andm_MDGexJBH?EDZL;MZSgJp3ZHT4l3Sr&!7xM>;IFcFCCM(kALOtAUW#Sp=ma%R#3f%{dwro1AU zCc19_`;Rump?`}A@u0<_b^QQ-i%NUCKU24K`B!+lJMA4^<*u<-!MB#ZTWMm;Bl=Vo z9k}>Nu^A{Ahxo7%t1XpHvtGAAF}qpZp_*Tj~_{P^v%fZb%{N1^E(9Qz?0CG$sTD-jB~~s@@KSa&u`+Lc`N0Q$-2H0q{;ooDKC4E zBE4C|vnhPp4MT2Uxm(ds@<3k7S4dJ}6hr(^<-VQU7r5`d-JI8yKtW&;B_glKNE>NU z+&Po030joKNS-pwwbJYt=QERZIi1QojO6So&2x2Guk_7ouG6)x-47wyW-{^F0=5E;Z|~j>_N&e(TkSZ3B3B#ou6iMbKF8WMmrN6(T zva~Soo(9--kEZd}))I5QO*UeMn`W|9$?&6pl?;ssc!psBCss!2PFoXm)7p}%7GJWl2PkmOeL@kUg)JZ0&HXf8+DA{dvFdzcFPoRI$WnXUi_;5V z`mb?wK1iJ20HLn%QVuJ^_t+2}VW*T39YLp-knWJv0UQtRIc^*eLW0d)bL>4FYLoMI zCR+S0?^Dt-!2EW3S;|~v!1+_4bCH8MVPg;!I4tUd?#S89KbVDcD4T&uQQ_WTHHfp& zXbyn50%EuEckY2XBj=z@ks^n^l4@M-WZB&iMUliSYU-P^qJ$`OXrz%K>$7`vNlu#p zywS}xXLw_vW~MYcB7}R?#GS^fwOrYq{$gDApwi$B`#{sA@v3zMK51;mOf!Z>Y9cCk zOfgHwjgtjS+nRRchI2d=2ebFERGYka(bEry^ja!#)Ci#F}!+=Fc~)t?x(2Dndd%89v=OzkFdUNwKYlBrqrDum`)? z{8(eJSrL$P-|+WiI@%WuUMY04On^3q4l@2_mKDXvD2E3TG!DKqewvq?|N^Yxg?N?+q=#KdiW zF!i;b;=Z(}yJREdA1HL}USP*Pd}sj98rt}(N%%3xuMIIm|aLs{K*!GTgTtI3)UjQTAi$#Hquzx&q9q; zOIydM$)h^Sz6-v9|APSk18SXIsyUYb1wk8sjo{zGkhqYotBsYdzR`ceAmOM!h<-Y# z;GfB}VDW7i-UR$^TD5svM z9$;WT`IN-WvS0~kBqyrViDYZ~s6o2pOq!+&fenQCYFh^KiD@dPu-p@#-t={)FM<4x zpXyT=g8gb4iABMr3bo_6`EbF^82z_~v~3b=&xsMOM3LVG$BH3*c5=Vl0#URktRKf!yA>i*RrTh0Ty1mL|Q`gzw319T^YK0O{=* z8cz_a@OxwU%;@JDn#_SCgO|>bHL`B#egr+ytpbuR!V&GnEi(P1a$Gmc(2DW52+~gE zz9zjF<_`P`t?1nrSvM)EuF9P^GOwJSReNJKDyj5H(^ONqWil10#&SKBXMQPX^d1?T zv%8O#gNKE)xxR(Z)3}w5g|ogr52vF#zt?-PkKzoHb49FrE?@;+`R=XIn1j}qL&}rE zker>7jn8vfS=i8f86l|V3~ChdNNr6bi|_!eVKPHZhHwB0K}>q`nU2D2HkOtOTsqlN znTykUV`SR+ak@V3xuvk+C*-T~7K<^qRq!TsLg`0|qznE*$M|Oblfzmqqhosq{ctHu znfbz8-J#FQ{*_su-OEE=x|Q(-xvxp%%9Oy+vaqYuEp-=6XPDidm3Iv?DD_mMQz>41 zG3Rh3jgZI#(?tZfOW7cum2c5Ft`_LLazmva%iHl~R{)!)kxtd>5M_GV&MfIaf#n?!V-PMx$XXTrt@>(hYcMzxaZMw2}#gdtbm$ob-OyFAQm z7+W?Z$ubLzBx_U|^-3*P%yH~dT|q1~vE;P>LzEaKw}Q|s zw~fIibQSm!<~oO6$;_W%u1s9NvsByBhuns!j-fRNVuVjfU&+zO%wE$fMeZD-d*IGe zS_^hRIcx0d?kJIamgxf2x6d~Z2`PLE_F7)E!gnlRfxk=lWM3QnX<%1Lri_QD1eP75 z{Bz$U$RhV^{LMuB?oiTHW*1hoYSgOR%rD;>T=SL4j}cYIq^)Y{5Q~+oTfuvnL5R!+p)%v=QjSwU@Jvz770~ zlIXI8hCH?@wg^%OHRZ)}qV!BwY|t(`;bD8GCdLNF`i?EQwilO%yD%;!nk&yuj@WDB z3HQgxDbaj1T{+0e&*W^(@mm8-Gcar*1t-3<^keSne?j67s7zrI7G@RJr0vMs2zA8Iq>*`&d4imNlfZm*xLyK4Q z)|zJR$9Ts&Bzjs!VBsE|cV!|^?ePtIVbi3$@6ZsM2ktsdjTZ%5 zfXx&JFE9(y1iR!_kLu20z+4eDD+vBp)j$q85M^@;VN?kzQsax-5yB3w_dD+c4I@5O}~#X-2*)2va-Ja1-gB6o0*9fmeU^c+rep-n^DM? zyMwI@fgpbyV zZ!iz~keFMc-*0InKy{f#ouS0E>2VzI@Km0s8;8WOu{@I2WUKg8LHA|wlUM#up*cc<9vVnvF(X`XqL~kH?@X-!o=b!!X&9SD6Tp))C7prZG>o z9O;b4mhk#*g`HBDYlDTY+yx@)p?uAr+ZiLJT%Uc%$bq};kA*434c27X~SK+skiQcp^!^h zTQP45g6Iq=4|iJa9<68xe5PB6<(!Juq|M1j6Dx)ak!J4awp}4tS7O$2Z&koS`4!K6 zA$BxFsX5(vv|+Ks5&8RprOGHGn>Quvp(>oPLDjoLCBf(Uu&I8bbVK#9^=h=vL4ElG zG1+oTJclnq#SM=xIeNdgt1=!l%q6PVrQUdkn$=6Uou9>)J^G$4ULEwm2si9X>(1F{3wz3(x{%A-*U zgI&fui#Wcim&8;oiQIF#$v;^3D{M}|#xOs|w^Bh^h5;+>iXA<1bP#;Q9!Yn79$m#k zb4epJ$$g|^!W6R^3ahx{$1moVfP%w4jfg{5f1?g!6~gEJl#F%)lB+%pKA7`}`O~3d z_X9^}M!(2P4{Ze+t6v{jkc~>OGJ30b_K{n^8vv=?N>J{`+K+F0vqA&>Odd)+n;FxUzNZ+%(;CV;HnOHH7iHo{ zJ5_MX9tTe%Q7E8FreK|?V!OS?vZhh^LwDyu7Z_bJCj-qUE5J6KSMTM~^MbvG4bC&> zAP(~o$8SU|z#^U;#19i!Mtbb+)EML0)S#&qy}DGvSI#$rRZSR|*IHMF5#~Rfor8B>p z@*?O$Yp3-7=st|RRoMtam>c2IjcP-2yerM@w#zm_Pup)p6HeTLxiTi2EAG7ZZNLR| z@bFpLz5F)wb6$OciO4HCVUa1!FLc3uJ^u$4c)4ZHYZq#JAb7dUR3XSKBmUf?2k^%>;B$w zV@eStPIse9ks{6z3-W*YiIdpwn^y7%mTuf?4bZ;X`e|UGZ(M(}c~_!IUtMTPxe&C} z!|IOk24d$P0%l|qQ_7PD^4i5K`r%n=Ym++Z%B+)^Z5{dify52RIj$A8Qe>ncAYs`1 zj!jQ9SFTx6ql|_45C;|xEKyHMQG<^Vu93?hK3`IAP*u-jRm*9ygKD`||HNSb{6+Xm zEizZQj4*t9N5nHo{)q|t8FKZ}!xr#C7LGOz4xJ!mFY#D_=d#zhI&tjt#}$1WyU%De z4s|RQ<9dETarU%HoR>X2?)OCJo<$&zaO*o(bOP&#`NIR3rJ%+m!dU6Mc7!j&40wI1 ze-B}d>8s}x(NYxhl)Xi^#oPzttH9_E(9hePx%^kyEsR-DfZx@s?$;K?NW$J*5L^TN zSmW*4IpX5Hub(587lkkX%C0sRk{j&Z{s&jIVr_&Cq2rfWAT6Z~a@N?50YUqngIRYD z!&c_ZzSc#Z)V_Ms?@ZV|sW04rc6%0h7O{^gtE6Q3KidWhX!u5TuyVp5{uh4z##>jD3T&@Zx#FqStv zet3{?8Hs>mT{HdMWC7!tR1~<2CtXxc>|f+=wLG+EJf`2%+3C ze$T{G`H-`B$E(O^#$|_uA;?!|M70iMivatUd2-2#)#^nns!1sKh$-{v5h(Cz0`d}h z0fRynk|sa7XuJqZh0h;GX>4Lhh4K~<6`5>ESYXqLqi!Bwl=H^AZ+6B(j27I|2#;v+W{dzT+h5Cum4)o7Vc=4$0h5f6B@%>esnEpKz{+r++ zl5?J=G!I8aYYD%4!T z+Th(10#U7D3x5FbNL|Y7*Owutv=;#GxZCei1c}n?m^RmI_Lpr(Qxo6s_h(=}^cZWR zxQ0DEQ+;Z`43_1(xLy;WiQz6|L&z3up}3Y>>pd93&otckcXmN0-BrWTB_l;Ts--Wv z&TDKOu%}>L5G4DH`n~|}YOe;|--hj1uHYN;_hxceXU$+uJG&YbzfP3VAe159S|~#m z%O#iYHNshe_nwe%oC5E4Mt#u4wl}#nbNg#I5j;ZXKNOfr>2!mkFy92exPN-PRf!!|+{U;`+9exR>B)y%~MZkti_8VHDH~F-}Ge)T}JG$XzB! zZ94?iTTgPqVy5qC?b0n{gg9fUy6~v1a0%~&GQs8>heP^eTE!|D33`W+>*)hW_wNa&=Sa8qEs{$HIDj<4r7xlhwQnYmbMx1=;ZCDH__+fz5?uLWnKM%j>>8-WC$P;tUbtgeelz*=u; za8zyNt=bIFwV+r>Adbv3Hl&NUOd+i!wkw_?v*D5zTB_xt6BdR1hFGXHEEIUqNWbU| z9y2^$PyW*bg-O4lUb0IdMOQaX=xe0!L0VmcJ-~40xV7MlF4lm!M!n@U&aR=hvv>d_ z?>sE*7ajja7;R%2O`O1+#51mLBQ5B@4iqIkNYjukrGhh%Lc{)ahVVj3 zLHxQ1ui5}uYezl;+^@PsNkgQwg21m3LU7ooM&7~i`d~1nzSz*}jCi_wTv6I2YBAUb zQY*FDdg6LZ=G??~e4gd>g1cJtM*G-7d5Gj%JWinwRFTA}OzeOVv^g9K3sfEXC9h(2 z7=~8lI5aocUmF?s01-K7pAk9dz%QKkw#dIm$t%hhIyGzn@l91azIVEAhn3I%&DA3Z2LGHK`5wn&bZSzLMtrg4UN`MC$B}-9grcm+akDFbv3}uni}vS>K2TH+b<~i z^@*RzEb=T8BI;nayVCO8d6OEs=VJ`VqaZ|X1!hj@v8?$RO9L&RIixwxyO9@tI`04= z3urD5I1|M!@It8_WO&QR6~=V^lii88|90-M4a;Mg+XuEOXO{i&T59`kGlv@V! zCA$Dh(KF(v#%TM;eN(MIOR6B;9mf?qNjiBdnLgK~^(HSs-I8Y!9nS~df9`Kt6=<)E zf5`wA*#B>dz{b%@-^z-J{y#xe)?exYNfq5k_L*VAx-)O_lTcAJ@2Zor8le% z8CzD#V8`yyne01WDKB0-oIC99A3HVOzw|J_o1rvsTcL0h_XHWx7^KExWeFnue=&xE z$XJk(#0l}EEZiFr+esWR5Y!o`#`VSZNgE&(5%ECL`qhhh#VH}M5t5iDu!TGjxaT9{ z_K6Db8Ph~Nd(K1+VKzOi0?PNY9ZvL=q0=g0Lc>HHgfS04xkQwONtA8 z*9V(2bCv8-LTH;pb&R+?1bg>WaGR|a_lIwiA4JAAZB|ygQaXjp^ig~aB$w2-ci&f* zh1<4Gx0=pivKQxI`&tExR0vVuaFA9R3^5AH=CPi53|Y-FLNupwU!WJopBa<(yO*jb zJ(n%h87_$YL}wW$p$A>lrCPHMU7{gp5)3iIM##V5D2Bqcftq+5PdiM`jZY??VKWyO!fdgVPtXrE7-=Jl7^gtU z&5B}$z*`k)v5>b}hD|+M9ds8s4GTCId1_u{Jeo>8EX5!dj$*6C^jqf#+kotvcV^pKZX-8w5Ok|=ypxPGnZWB<6o6TW-OgvvtUvY}&GgPqR z9f6#_AKUj9ev%fQgKh8)BGyrBXfrgCp)c{MvD^}h0qNO0wqcWY#AEcnK%+Ud;=~nG zbAi@tll4`kJK}*c*s^rf9>C=AiQzSSEr{mbo0;5geF#*h2%`zB?Q<=ACHc-jsBx1V+1S7TW974@ zKt)=iVOdt|GiHEbG+>m?1>w5M2Ge)Uy>JU0PI+muA+pZ$M_5;fh#FhZeeN*^4TzE` zcKE53-mrPTW$t{iWuT^tcB{CA3SsG$e<1KCm>|e={>nqc(($};eBfyw)b7oFq{<=G zk=Xt^gQCM_h&2Z7n+ehI@WCWa+l}(W?mGFyGQ*n5!PykkG^)EmdIIsoNJIoN=xwA!Q=z?<$)A${IlfL*8|RaH5Mg{#-}YyxRx=Vc2Z0hr0&Sx_ zOPY&gzUykH@_IdTG;MiF{&m-8YQC4tqhH}k~Cw8~&d`(Z5z zn{CY$7ehc=i3#1qE|f`jWoNtSE9M}M#?8?Jyc6F+Cw+rE3Wehh6$gWNL#UC$oVpNW z*(#MmnvxgF-K4Tvja&mTtX<>+PUMeHw8w2H{0ALWTfm)IK1!D>S5T^(Dy^>QzS}Vb zN4c5{_Sk{y9D4O5MY3znuH4XgMdz0sMw!8qaOIAc@QZL|9$y%pSDgn;i#7~(hwdMg zu0S&**p)f^;(o)FhXr4B`<(~5l<3T1oKSBI=tS>YOe zZ;&#x)2r$*j!O1TZn(~)H!e8Iq3XS?aMWn-`$w7})>yRA7Lc|Oeg*q$Vj)Gy#PFFs z)BSm0of`LO<-2Q&ihj4xyQ5+hR`c`_vq5=R+t$d&_bPV0St)Er*VZ_9tWZ#UL&w!g ztsTy|aBY}X{3UNJOZI0%X_+g690#L$B6lUckm9lYlS3R47;XKeox-7=I^3ULxbGnS zCY;}kBsvldZfekbx#hd?&VV6{4`|A?`?Vh6F~2I)1{vPn0buE65|cCT=yLX7N!6MB z1B_tY6%Uj&xWhzWRrD3GiP8lRRr#+*$kK514_oX~rJ@;zDLigUt_+!Vf^UY=_Q;6k zC5AhmVL=l0=BkJf7t|mJ86A$8$~C52dDo5{OYa?DsbOO6Ul@^ zD5ikF#h~y9rwtS(%*D+hTERgw`3%9B^N@zRT|nv+#~FyWP}^T%Z`V`0lTkC06+Pb9 zedl-uI92NrZ0*uB9aGkN(l`l!zCK?}0d)Fg83f!khxI2V)ne`Vhw*5})dq>tQ{wg~ z;-PSpjkWreyE_pFAxa7ZT1ocW1If|1)ROE3hdV~aTHAhqsU_G^hQ746ZFsro(7O7| z8CMcg;-i8bjC4i{l8LRt!Lb#fr*o6`qDECgz$KVOgP+Qn|I(yET}gA6)?NuikVsQk z)>WpC+OZUx+n$vGG9X`|Ac9CvUr^uoE3&a+ptRp+x{8-hAy#IbZ?;&QOh?|Oy({=r zFxRxG{nVX4t6UH(wvlXtWTG8zLW07SHN|mPs^Z7>*aVM)*Fi&@E*xG|A+OF?GSOA2 zf#ki+WYug;>fEFxk!BGMPk(8FHYuZ}E520M>JXRjA_nAf;l)XGo&J!L+NFOC0<{FL zMpe^LnPulr#J-x^dL>Qk{xxMXBQRY;IXBAD=5vexDvES!PVw(MR=r(yozQ zc4~B}^H@o=sj|RGuPUg9F#xSU{|`(QjKezB->6-)pbcc2X=*Yye|bpwpxq3T8j@5TgWJr18tGKB5qERFFka~F`HG4IFpd*a+oGOXbe(agi2oY z8bZ7J2xyu^O&J}|HYJ1@xg2B)7|bq=ZSWa*R4`&FUvYSEXMK?9zIi>P(@j}x+e1H_ z-Mxc!_+*(>c78`RL=kIMiWh4TDH5^IDuLXL6q z;2%T(o>_owd<`t6d~Je`C|iv2Y%q?%yubc}w$Y}5?H;@|%4nlQ@($~e$(nBJqeHRI zlAs2#ob6P%Z;qPQh>*E7Ml}A3aATTcKInFj*}gsVV6`D+YAuU1s3IzNO@0aaGgr*M z@T)Qb&Z9VUhSZp8nZb$(sHgHCd`m4ji+p=QiZCnRLQybw$j^nmTb%EqSXe-sZ{x55 zdKP=eI;v*g^o`Ct=Pd^oD961Kr%2P^#n0tO2)W;o!$~i4`H$dcjA0{1HNY@@Q5FsZ ziC8!#FDZN2^XH%vGWLY*-bAP`-{fmC<*h257_Xlae{J|Q`W^UTty*7pEt&$wD(3<0 zhoCmmR@U899;r6}jT9ahTqwf$E6LfmDoVD9|9VOLun(LHfUSal`v*f4(1(LKyP&X<=pQ6oe0FvihOoHM9SK z0j86PjhuOfz+-;^kc#kg&UXgMT?Ou48VTNw1oh18_XS{$4Z#0k3A(lnL)LV*U1}syP*; z3yDoo{bt6t=5D5QvWyC9G?Ks|=Wt3=2x|8WA#KSKU*C#I009R&k@hH*ZAn=%PK=d) zGM{!O23xQP-dHDeIBu^J+`w4^C;<_)U$`Mr1JKUY*xvt3J}q*mYghS?0enyZ0Nj6H zqm-@b|CLIE53BB2Sv35!4!Dv$Gv<*mE8ze80cnp?BeA6U25+fKu!z{WeNTp2`16hw<`%C|RHJC+Q=FXucLi^;gQzjDTMTt zkO%ES98lSwhtSdhQ4LuX`_436-xgCV9?I2f%t7AY8(Po}BcB%Satea*{sN!PJzGOM zDD)M{e1uvSHyPs)2=w7o_y#9Q@xi-Kssv*Zt0t7VXE0bLk$`8Px$g{1{>>#VXP&hx zfUnmLm8$;6Nstrw8EobqyvooD0&YBCL-VJ>(Joo0ncEJY6Yy0TVES!05jMIfrH3ky zGO$|){{!-L6Dw-~S>n220#KuX#_)0a<1~^l#nn_zGe{L&hfr)QV8Uk=D;suKV54Z8 zAiG&qV^PwLx-(@SYTkk)Xr7s@5Pe=Uhy%H6^LGFF=YP%lZN2Z1qd@}zY@-7J;QxC{ z{`cSRzj}Bza4)14@9*r!4n~Y$_$Y8xtF^1cVAzxgt62NBaj|-JG>u|LeXEfwgywe^ zrreB>qc-#H)eBhGTO{U~9oF+K=GZ4@)+;)3a3eMsu^-(vEYkbOW{!_M^CWNE8%sFz z!N;n4JDmrYqVa*~tTpze4<@L4i|lX`pXdau4eL zUUt=iZ-yKl+;rwYi?F`O`AEMt1|TuEP4$?o(aicjp#M_ti6gl210l{{gT116^z2@n zy`;C|z&ZUTN4IGt7H^f&Gw82e-MN3~1G$TTEH3*`S8b_uy4>MaHqR?(UHE5#gZ&xdbXlhp@1#Pr2=ptb; zBOm|(WXzsgx6vKG=h{F7i)o)8dJ}Z_U7mhFOFZJN=6g6@9gVB|7TUk=E;tx)olS4; z9p@pvcvD%^a<2%;53@ zx-x!b)#53KeT_KViqm{-l}_h2&0*_oT6rnu+V8rshK_^Dm0hb%du2rK;LDNm3=8qt ztSx-(KtJK?A_WHWo=IM=&73;DKJeBizQL_8ZIXyDGd?b*W}{IpnE~j_BAr=XRq|Tv zh@W9!NxtqWJw7H=VtQZQg(T-3Fw9b>oaeOKo;w%EA99Xq``E;W^& z?qD6$o?$5-_MUk4c4j5+;z&(fchQqt?|2_ONgcs)puXeMpb|WMxHXAT7GPvK?b+;U zHcaE5S}S{8Qc^^UDf2R)ZRKM#ncQ46-ZmY{R5ddunHL#jn0S8Z?YCS{Pw38@@)Fiz zJtgn)2Via$npp!~L7kJBnpjvc{p4c&^UjL7<5+#+xitf2wG*TbTXJsiX+C{bOfyuk z=BP+fva7Cu=5@k4-iA)$h#8v0ZGd6IKbmx;(L)U-|f zjU4CDlVJ*umF#IbP1n1~$Kt9`RP(0&`6}s*xG_hgc%DShmgv^6JScczOcTiwZWSG&H5(hw4+8Vn>Y0wlOLCt<~ME7B`i86XixLj)`u4_U=h`QEbR= zK$aWl5_E^#fmvDcN9Mxz@)R%?l%h%gdu~JP^M`Ir86lgCR6)dgnm&wl-1_|WVeS|=D6-vRF$tkMka4QxD4dl(C zr6kSi`yBRNLSRRs_Xiz{PB2HVcDS#cV_# z(sQKG8T++^Yn9QYijN2Oicq6_p==i6t(n*f1K3z(wzGqpx1>P)XE-xRf(B~4c?luI z!3P7P`3E=$q#lohVRNPBAam;qaL&^kHjCr)5;HP&ZlaWHI8Rxp23LRQcRhsK*f^4& z(UKC}#StT-O{|sy$Bv8Ai08c2^$CV|-iOD|Io}yWX5cjI~S6vW1KX2QhC$ z$wiK+VD)}<;{lW?RXGYkYW3q>iJgO=RI-_!St2-G2xEuJyQv8IO7 z9s0@Fl@ye<)tT(akxGWL0^L6c`BrcgGvQsAW~u9|%M@)u@*Q3bF88%aSkk6thF~p@;q2AmFQ~>i#n6j23{ntj?K#Q#dorpgNPlal1f+K z6#5t>D-`fGl(nXnU6@89kS_}Q)Tf$@B$kVeV?P5Z zCnkNHId9yPP|vIM?3Ar4?3*UTM_TDBGuGLMg{Pa8HhqH=#_L6{y&SIrs@!hrlzf!# ze6k?Ml~0EgmqEm@CtFHqSwGuXp92P1mw37kT59p}La$7_UdD zZbL6+?UYu6OiY!o7yBsFj4IdM#7DoS?SMPAX_=JW7h4>6t4~Oa={1md6i2ZamL9DD zRHR#Gc!9ekD^ybZ09S;jzgyO~?R zO}b_^9Jp(u@TZ9bS2Ht~yccZF9Gx9;-u>Wabc%=Os-fG0FBc^%9!ssvEPW*FIM-u8 zPS|3&%RVLN=UDa&Szl8QTPmYcFxsH2xAAMMzWh0zpioq3xcZ@Uk*|APHCXWW#Lixr za4RpOvOHk@tYqBJn9g>+*4MkN3b;Arh+VP#B#!NR=>9CUt#TE716}cXTGcn*2+xRHp~&^bPQ08)(+3mmz&nRIp=9-K-KyUzM*j==BsJyfMPSSUg@qfU2#vlX;0l+&v+Umb?J-cZmIxF2cKe@;Y)j^4W^SV^ z7LBN;R-fv#C)>iyk~z3-mEI`QrQ)uQ)9}-%i*$(9?^RhG)e>va%TSOSz8DZlvzzvQ zT|n%Zy?303?>3Sd!ed9-`+5FPa-H2E-CpD;DuH*0VNg3p)s|QTRQ;_fv$X*0RmogW zxB3|0mjcw^Xag@Id}kf`>7rzi+-Vfk806t{*wIly<%1B z@Gtz>3|^vCgRMTSXLdA2rr4* zJ21tNx3e)iE~>i+ZpF&i>2Z>$+Znw`NFkT#`fsI9?!RXr60&#r!bR^qLGm)1?q1j- z?M#8c`Pi?p^r^48#YB6+eMi;(-S0#*$ABK~(6>X76TNKd4SvvURq94>UKO^yl*=4$ zpmOxKupnX_v93G)sVA4EiDMc*uWZlA_3|rD??};bNh#@9!gCRj4E|(5-8Dho$oz5{ zTH%}+zEqkUT|whWO|H+(kP01oi(UAy*WcPrH>gpVeb4i{vDZg3LKyMFiU+ z$L+gCe*+7;v6xHpM5~PoGx`(5M<7m18lNXd7q>`|C!}yAMx-f_h1FO+NWA$q7+>p6 zJ>`LY=(`)3h6n%4Fmct|ba$)*iyA|zT>^~sjwPrXYzuSbB#@|Z>#e3{m5QpSrsn16rsuBiqMN_g`P1fV!XQoqMAGB+ zvTe(L@soMurK|JykuU={2lP$H_jQ>d5;sgMoz3LO#q>5`de2V7XUwIu2adDfk_|2Q z2hzwYpD741doZDh9O4#<24=R3OF2Eae*uUotz_a89b)G3p%P%`IJUn8)C^+={Pv7X z)kC6?U2xAGFn#I(SZmxEu>8Q`tunS)A-&wI`WPeVuycvLlY(!e7t-))o=I$h3`qqt=;5=u`7qZ&urh{%N-F$Qkfi{@NtJcR8|W#mrwS^4TVz z2?aw`W<$x9QA3dC%JF<FT14)@&sNabUfH3g?tyDn*jb)?-t;x ziEW`AB2^{xLPn%7ldem(Sr%;+5|E_>#l%ouv9{Q5Eyyc#J_s&W&R}7wvRtd{wmHBk z?mw1n($ zkgh3vdOS6eZr~10`nKz2MG3RiFIKlf%)n4M=r0m&XurTSXyhhHJZYs?%i;bVQGDi& z9cR?PT2#YA;Gr!Vk}X!bWROm+8HE`%d_V)hz5_vMqDGlUL-PgF#iO2p7@^&qHl~RS zdm)9G9+kQsAK83yECL;{@8=eRvR0WE>{Y_irP2a8Gi7%~)g2~;=bbyS$Si3~hk&x6 zUX5X8pKWJWGMB`}JP|sQoWd@{y3cHYXfG`K%%->|p5QN{U&|WxTno$sbh1@A`0RQ#?f4c)v+s(W?$W4IP{YZ=q(o zWYWEK8fUwTz&7V;q%N>G(?^YVDJG(~P33oz7+t7vjKZC5(-jj2{mI;ykj3(rN%nsj zd#5PPx@6xwD{b4hZCBd1Z96No(zb0|m9}l$wt4dHxA(Vucc1-@bFs#FF4oO@X3Uro zG2{OWL4zVDo?KUA>Aurybtr1U5EzluDsMqM5zsaV%D;62m70ePM#S3>9`og3kqK>vxHSp&wy{_q6z_)( zkDDs-GT?aguPGMBfp>6y#|3 zVS}xdJJQT2f5up0S+m&+N9*Gc@y9P&I1MmUnV%YJ)29`Mix-hPmDj@M)b)p*IWt)h zBLW%)%$ixG2O1OlBiwT>PxoOt(VXx12cSfeRYJpCFP86_DAiMY`my<8gL3j*3=gS` zXxk2wv{gx9A_OBoGN&=S_(dEO*>%z)d5tmQ*QhcNbk9mXc0aEu0?H7oAw~O5vLEI1 z;zn992&aaj*gIMemWvpKBlL9wSMn4ZYv$^4&*Y7aw%6d!koct0CiM!F#5O!Z4TsbD zh_WFwIrDqYvMzPQLFKL+ojU-9O}RuCT8ZwTGK`&1J@+;-B`xBUv5bZgQ?L`p0}=9b}4zU3hftp14`8Th7eyPx%q?t7=y9n=`O9%U9wA z<1JGde1eepc-^SyC4o{z4H!8!_dt13bcVcHdF%Y{o>+av1L-5c+gWl81 z)okqNwBLC9r^P&a-}_RdZkSCX-u{O{4G^VI_!EOQUllyXCPYn0a9*|yU?SeGZFxiV zrOFGwkLbZwFXg&*vbsocrlZi!WK24ACMGsAZzZt`q9u@s_K|Sor`(?=Nw~%{4{9Ux z8Qhf`p^l&hZEeJQCgpbNZhm|4Vx*bhZKA2hF75)r><*<7?ngp4QC5%i{Jt? z7z_yXA!MALlp1OXb5dbRV(z$_w%sh=mo9)s!EJc&N<0dA9_=ly-)ceraL*q=3~Z!xc|( z&dtA)xR!B?*_L|+WXrvXyJPuH9T)rU^S2>L{>Y zJ_0%^!BHI88NaHNZS`*q7%Qn1&1;&<4)Aw;UOpQ$3v8?E-H?!SXBBYT{Q8! z?-yCx&YKkxa3Lp~!7pI7a7NUc1fLPOk4((~QB1TOA%o@Lp)nq3CS?${O^3peo)!b1 z;FiZ3v?_CecY6l;C3b*!hei1nu+^_ttq5B7j{2o^@aF|*KBuZNTB7SHo8sLuaSO%i z-ET`$(?cq<=$3BB@H0`*bkNMS0!zMKdO~JoMQAtpX@F+g?^76$>izd<*N&y~C(TyS zy2m@uk4D>8Q9)dgtzJbwl42gqHo7s>lEgw08f%NVqxSfAc>aYDqC>G)kV4~pTi~Lo zv;^nDEUi(u9;^TtzR<(Ul*mk%kj zl9|S*T5zPSb+u?N#IZMqD$?J2PH-Iip&hF#IlEUfEu-yj=m+Z$1Dt7}-(x-P=fG?P z@mjNJvk18QJ^M|{#?sT|D@cpf6h6~C-ScQNq1!Yx@mO)8L`?DCk}0+WOVGMW$cd~i zyA#2C63GkJL%**PxyDW$Jj_3qUsB%^NB7Q7SvPD4<{TpM)Hj7Fh-T0)YVCOMomb6Q zc>5!AJFP1`wH_!O;-xN0kOmLvbHQk_3?m}iykCvQdP3!uX3zJ%gV((u` zu+uNva!`r`#u?Zo;^>&rSmSsWizSqcm}EwY$L81_0mT<jk$jOuPpZS?aZ90;TPB zh-Hv72e}(f*&pQQ_)WhpI^&XMq@B0b2JHP>qRP`VZ-Nlf89{D=#V=@W>|bpzWfQq4 zXJi71$D4brm_Is(VM3~h96#1_e97a=EMMNP!5UhhpEp|5>!+E&xn^96Oj7EOZANGL zgO;jYJXexE8bY4z=LDiHE~UCd6)eYpQIT`bYCD^v^j{#RgU7;fw#IEYw}qwxTMv|4yk zPT{*fW^{6B<7SSlFQld80Ym9x@Ta}N7QD0#72K;|OCpN0zY;UQVpidr0LAk38@^!v_UJzW5!B&cHwA8VUf zHdDNb639c?>4@*^rz`EDPX&ze2V{Yf)VFAFH4grIi`;|rCIQF=%B;dqLZ9iU@eXDm zLL8{qu=nsPPQl`ZLy$XAv#2MVQ8-GFJFw$>8iAj!hc+u0M;}8vaFjqwj8;ht%m^2O z@NJQ#(<=OiSXyof6Ga!y(X>IiF4L*=u#N#I+_UCD&a))*KS7=6jGZWQih<1J*8(!- zt`RFb!9+&$@5~`KB%fk+6DCt0I9u^V1pDygAOtT&p?DC>3Ky%;3DtiEv0q5^E&VUrS%fGNEfvvbH{BTAA-c2OOx ztnnEFU@KLWK?7o`k*-Cv#eK~YMA>96vM;wf0OJfPem%(aC7_o_DK0BIsE4G;94Z#N zRTqxmPlp-@7#@k}-^<0hrR$2*r9 z0zj>vDir>Ohc{%3HtW5TZHI% zgjfOLy;S&5t&lB%^DVO>UpQ|{gBuC(xM(rAULBC%j25i3@h+3GY&||vAFM`0;!O&& z0vMvDY}W~D!uqr;nyYBr5+}wOZCiuz-R2}0?Wc2&=z0)I)7;V;wnuP z*g|R`3~oA~1c}?K#(tcjPT!W6J2QDcM5Qd$L|FVIu&sHF9g+xpt&sEai%X155rjub zwDuRJ-DgE)>&f-W({&P6uQ76k6^F;Bc=0=K^M39Z z5&o&wewdJu(z=R#vqQ|J`WTInlTG@Sd70%Ma+Tj1JZ&+$K{ zm5X11C}4@b6R`9CpNl_M>p$Ck%d;=&Q50j!N0_gchc9%q|0=U7 z$aY$jwZUW7L9^-$^U1_vyiL?$#q zUH|<=n0?sM2kiBWShb#ckUl~7n=eA>dqlO42t79tQco0$a~9(FWOD*!v|LnwZ3vSN z$u8gJJp|miX&yC4(D5%x&3$7GN1A-ymBZ24^3VAyR|0l%_;Z{*nU$+nY$In$4_0Cx zEVq^f{fJE1b)r9twPp$wc=E@L{0uqw69i#EyFAuJgP5u~&Nc+LXSV z0%p8uoGOB2^(R0^<8T5eMANT811ER_CvYWwpWqsJ`Bg7W|*WPdh&d4G$I z2-8`FC52JWBU(##xXwP0Qntk%Z}^-~0SMvcv2(PByrDIG!5V*sDI9jmO@6@SeY%2z zRBh8?6yrsY@kDq5%l-dE(|0VGi3mJ>aZLCs7OzJvY#?(U)Iq(IcJw*GM zxxU-g!#}yZs)TsGauEDxK{o7-QRx+>l5@v*-(};}4kjBr`8BhpXrcpmCRTRSH(nCV z6RbtS@iUxHXar7T(&jQaGjn5Ic1Yy%5aic#sP;zc-sEBYhP!DJ*Kd?b{pfUt<;+L> z2muc2vI9F>@5RYB4l&2csY-5;#vfmN|Fg9ACsgk&4-x=?68-!6zn9iT|G|X(C#>$e!TEh6^i}AjF;f{e32_OL922nv550MtbS@Z1yY7ZfR;Mng&jdc>?WJp_x zbt1N2olR|!cAayIYe?K+hd7Z+zjV6lnC*Ca?rN*2mJGhB=G#8meCqu0zIy8%ivMzd zW&lXh8- zMh(TSgS%aJ0P9)=#8yH6!gs5}fNYb{y;GUiHhyePc|F>d+>O77>%%P>xI=RQvR$;t zQ@LXBO^JF)9R#jSjjClmIvBZqD;ln(WB^Rrh@PN}^x&4Dh5LLc0WZ4bdQZN^N2-Sd zC3>%qlGiV~>=~W6Y8S~(u!juAOJabPyL5N%IRY;`kvaQr?+&sPpRbD?E;}w*H!dRU zc5f_hM)m!<%&%Hp^D=2{>7!Zb^H*QAZP_zwXXQ56FE3R@AK8JY8=#hVxq;|wLlnLq zS(JCF0f1k$a`olajUDBC*LpspTRR&a8#uw9PcTl8kKn+qQ72^gc2cqtL2s2t%nYl9 za*HOU3Q1WtLHBB_@Zcyhe_Wv(TwI(ROkb2*$lt)YxIpWs59)J3ga_90LrWTk^z^kq zVtlJ7uyY&2s?2(i#fPHNM}B;WjmJo&A@HykkmWmE0eUa0F_YAEj!8uYiN?Q*je}=P zo$l?4Ft(3v;@Y`vcrXj(d)a?NQkQZmnbpx~%SOfNCJxp5o;qyilYxxlM3i7`Vnkwa z)8NUbaW~|NNC*Na6qsvV$nT;?pN%B+M63a|0#2aJGY% zp4%|VaD^Kl9+xOR5>}Httppd8+Y#UtM~JO5hITDNeRW$=pFBn>A{ee_Pc0J{wAD zzGO;cpeeV`qA(c|E3%3_G(@zQyY&j#T>>eKV^T0>Mxa+x?4o;U>DjCNne2C4b?7Eu zh$_ghjpZdW5FV3k(c;K`Tr**RG8XR%w2lL0(F#9fj@VU! z`Lk^TGi1k9J`+eA7o@vw-b%TZ8G-qw%vpdn*T2p_STKDp`7Y5@{e4Z-MKWpsJ8$o# zL?GMxg|8|nXlv&Q^V64)@;N?;Z;wu@EETFzQ#R81Y!CA@tjqkF>ob=ikc%Z_kcmaD zS+a+ZaS}Q;!?yzcoQ6aUBk=HBD2CE8qt#4^kIxh24u2mXEc-)2H&u3FWqQ#sL;fg%;^1h2~eWh9dp5{Svhj642%b zjmT)wYi#a@vn`|bUJW8zzV`z{+A|BIJdu2en9A;Qrjk+E{j_PC$YVI_0IJXMD7kK}X_TIs2z8&+`AZ~ci&#F5r=!uh1E zA(8D-epY>H&A@-C4C34I{>(xg(U8V@)LX&Trno!M+Z*~V<+!$*S5%f~%kI&zYJ1Z? zVZ$Bj&2VXp9+69S(B925@GPWZOAkc`I1?|KL+!|C;k+%g74kxJ^$>Q8K z4mwf~6V!*}YR;7CAGiz@&f%2X$;qvAh-{y_GlyZ*Rz%P|xO;L47gnmMKL9$vG&8$Q z1d2jXj`G<^-0 zRi^KSbY6BSdQZ5rOm)0_k{oM~%#yDBK;e>Jyj|E7Nl1sXDG#>;O}l%c>l{^OZ#Ov@ z=teG$>zONXwPAxbX3?Wh9LU9s+-fRqboB2r*DQ`iCGMz9a;)p@*;!{f!z+DJayFbc zX)KFuHN$EpMJnfZys*QxIoN{sz+Ofjg6MdsA)6;N;OlwN4#i)3>pSAd3g#d*eT>Fywr~e;8gTQwP>jpFI9#EhU9(0_JFy_4{r<)ok@3bVSGATNW zU-o(Uu2x1>-LDOa2^kbwgM(Teom}v5+cX7GOzoC8inub1*U;lHvdVNrT$|9Uvhhf> zb(~V(ve9(%&#Tn0mTBkO8F(hy=W#0VdM&I5VF7r783KT?gQ|CIen>RqO|F!$dHV8;L&SUlMzF6r~&vUF%M4{#|~E&p(9lnS}3$U=XX8k=v}$ zi<{l=S2ZY3eSWxmmMS=N4IGy4EuafJ`a7rkSpGQ}4OHSa*!xE#pZ_d$1BP8HaI)m} zZwjTDy1Eu}HDYr#5N{X^;Iwcu`GpE#6v6OfIq8G}4C;{_(&6~RiO(FLJz_tH`id5# zK4Mx66@QC9tQsuoW!YkFf9d-PRP=zXUgKt%H9N4h8$zog46RT{vqo>W%jaM> z(Ti=KUei7F4|P8X;TIC1TX?(}Lp3oDxNVXCkVZVxU9ssSw(Dqd+JSvL@S)fMLG?;( z-l6T{hDxuD!jTK;2hQK|5knN3QT%LOFCLD4*aX!e@0EpQ#d2Kh@FnT|AH>I-m}v7U zKxy`394J7m@`13eM#a|ih`}f}d5AS#Q7a;)wN{~*}-(dHC+$x`+zA_7` z^=}cJVw#QRc-!_Nk=H+**d;8F%x^3#h)KJAU$!tU&vocy8dmpz-(}=iMVh7`z??$9 zPF^%ldALn@PQ|`aHca=-j#xj^*`dUDrEhtj>keg2+>oQ zOvz}!))L6Y^>lj5T@X{Ty)hlf^JWWwN$qEzH;@xV8cThpLaUkUVvTI{YSTVh(>~o& zK*yM4kVxs#^F`Z??kHS3AhcT`o(+8c8<`Tg)MG>IyIoxVTTt}Bh>8A=bd?>A9mH&{ zjiqdDEuHQD4t66IHDxis0VN`_+KUeg737Ug7Mj4CD;gKdbNypwp-SNR5VA&r5LL8o zM(S4YsvX1@7PCXV_o6I3eF{{={%s>I6Rw?(hvVZ{e0;wE)q2bPjqEo`s^2S(wLa$~7h(rQ2FZ={F*&zP(#S)&!cQr$?;5*<3AFmcA!QpM}G2SK!hU6fbkL z$!gJ4Y%X6GIXy6ICL~EjfmHexn`Q9&1XeIAnm4;~A0>Jf#k&jrHcl!`knpH|xyAwu zCF&Ayn@Go`wsr9VE^~9+*}U7Ry-b?3g?IBRf+>0=**i^IRFkzv2K}wZiiK<8enyez zp}}v0)o>d?%LcPzR>M2Wsgu>8-Dj_b=(c+W%K`m#!h^XYG zk0f)aT~)~xnqG>k;3#xC0br${9P`)kp^nf#!PJkvHs>UQ4n@i?kN>858;8*miTyUy zmv1fj|6->9OvQw3ZA{EfogMW5o1+sMZ!L=?06+A#cC_fv>*t%7M|eSyk+aPN0jJEM z42>e@B~)dXM060fuFZ0zXims*6NuLhXPnWYnNzXW&&E{GbeH{cq4oAQHKhx1cugKG znKGis8jc+SeQD1(G{MdG)Z*SfA&41n&TTaPXw7$huEJKM9jsqtZ}y83W(YE(a^j&F zI&kwf>_ii~u%y_K(>huOfZR&B&o zmb;gg`=HFes`gR-#-#_TDZ#qe8ss8vd%Ilai0#yygJP{mZE$^3q5RAoW$_+uRq1(o zT5#FdW0{C(Tb!xz3PR~0(;cE;TMAK1crtvVlUF>#iDu0uAwqK(9-fO(=~us$8n@Ly zF=0@kXLNLJXscOH+cYO$4=>9j*_>hghnb)=ei3|9-LN3XEXnN_jYkIkfcd$}{*bMk zSjDNkD{?ly3Pu0pGZ;W2V>GE7{YK$3Xo^RH$~weLofb0(8AEl{C`ZS`0I7i8I$w4 z@12(MtsnX~{Ni7)3)xy*e*1Y%jczjm79H@nKl^zUOKQSm|s>6=(I z2iKI)s6CoYp5~{#N@Gm1OBOPSiA3}R2^#bZ=0YvW$~Aps;5h#o_%l%$f2JSoIyb_# zh)Qq`_}TGjlgsgv=VLNSU zEbUo4-9Dfo_NR2{mPVe>SWQ^h^hJ~7H5nppRG<)Q!;I0?G0$Xx0T*l;H2d%x;9Ngf*7aEpX zTMcQvw(PxSTusV%8Z?)zM&WGhZ~?mPkz_woi@zt8Ih~GGXqKMGUa5YEZeQ;e62RdK?vj&Z z2$_`g#1Q~I$1q$%KBkv(`vMD%)C@{4+oV4lA8D{ZXDw{aTY|2}&_xt0iL1X2RrE`6 zjaNh>$Unif0h{0hCH{*wXwu4yYlj)%vQ~dU=xRRFjs68WFAKHOfzBLP;0iQ48<31k zPbh3cbQ>m4v^O{PpvABswskwrFFlipjA@!4XM(>kBJKCRO|EZqY_IkDiN45gYpS_v zigfub!#Jg8e?e;;`o<-X)=x|}^T62Gf7x(1 zAw^Y6P^MAYncNZ>ejld;X(x9?Q< zCY4CgajFSSC)W4sQg0Go+tQ4XrYRFn;B#cG7d~VQIP_G|dg$Jy z=ImZ>9B+XefgO%PN+kIfmVeUYuZyh{fk443V6+=Ob z*~mdV9vY|SeR_>Z6qONfp%9b0G#qa^Y@7%sATmR;HLx7FlT@|CKRp!3t zj*{es!RvV8>?>D)xAM9A-xFWK4NxlmVFM&9A%n=$3-YT4b zub_a(RRO(EvL42e04qc=RvN<)RFANSP?>W~?j`!#lItTPVY2*m0#JhiC~3U}TLb^2 z&?$;IaR06uJ@-n4#WK#q>!-+8v4ukrgpoQ`QrRMbrL}`RDlhKMWo&dr_Dm#C%?1!g zXNYmii5+7zvQ2uzH$2(Ii9?IyCmG1GlME#^bnKEj=2XD}+;lexrX+J-N}*5AZw|Ar zf6?ImW%Gm{BN}nvEIhxcy;`hRolf7|&#tJx}=j(^bt641wLe+4IHVkNS~Jo zIWy_32G+3$)*uC*2sfL7YL@ zn9qzyR5$^MU=Xce01?8%O^JoaXoO57$B>m#Ndo+I2~yis491SD`Jt)`E=Gr;(M{aR zsjnf>Yr^*Dx}RcRw`5{juDQ=Lta#`<8==cqIHhv1m%~m}lB<@&c*U-Z?zkGKCthk+n@1E6q%H`eL945I?X+_ToR-N* zO;R@It*2nyR0+bI_Gxe-WN7fxlDseaf?tjyrxg`Aj6o-m32_Y&ZHUX?-Ol5wOK74#m0o2%HQbil4%iUd@|JEycoS(5?ezRqM zr!@S;*3(^fhTQ?s=4h3pG$ie-i~6rY5=8fyTLOz;qFjVpbjDeNqufGk>xvD00>pPr zi#!4iMX~i%A>z#rb|5&2xCU5(&HZ2Tb`x^%Um&;#y5290*%aA=yK*zK#-5AM{%bXB{Q=MYFydf?_+8EleU!vvHn~`v&L0=GmNAaub?EuPTMDra%@mcYfS|H#Y@hFLI=SL{6 z3qaf%_Sz3h6J8Z{OR1PDxhGGxSq^>elf~eU5t#aOu`40{7CBA)IYQi7P@>3((I=!n*PIV{RdlfoQj^ygKw1$1uOZ$!m^<&G=IJ+yCoyJya z6Qpe6uD#?ZkR!NLtPk)Vkxd;{BaBV+09p7XJt~9C{j`@JE5X{-)4yB{VlZ&^v#Jpe z)jzcdIkd>!5A9;R1nEFVtKA_6?804+v;Z@PKAqWIBVf-2=qlqG5#7A?MrHs-FrEe=YyP(|As`K0R`{+mFlLKTKVpKpnxk zj+1|g!;4?21yq)JSytE_(8My$9eLgJN}Od;ZTQt&S<6DB=H@|0DpUSk^hYC|1($@(swle76lmFIGQ_s^V9#1+P`13)BjGG|7+M> zp`y0!U&K)+FY0ZB*rsRofv_aHw!&8f{74A{L1GKRxu+kX873>%F3E#kyFS|k$!X7e zUf<0CSJYTiP@qUcDV|4@Ek~0)u8&96wp%}#uepJMWEsUyNPhElB)}*+HCyD^Y?uD_ z4lo0fX3237f7AeTzZYAe*hf zrm>7DXyd_x!Ad0haQPwvXDPmwx9>3wtN<)cCkG?!>{uMppO<8$ukNBpMutEWLVx4xVfEC8B>)>g{I5kD?lZ zJCInGsE>)h1*1;oC>5dnu}L1B>cBppTgH{%9_kOmSVvx(YhIcyLOxzh<>D`V`Qk5- zxcWpiy2;<+i0bPD9W}zJT>CfJVxb1Y?|#(L33}y;4vmj*9Zy` z3L9iyVnQ>1WIxDQ?q^o6%#U&ul%k8*+AZBpx68`SJod0Q06YUlbLNtSz|n| zpYI=cP)s84N~cp4Om$5T1aAIw=yHG48npx9((wQwL1e z59HG=&9C+fEIUU6^miUdhSfIgGr>0)*Qcx38x;iE2(yU&$cHA>f(1(4=@13j;=bXm zPc5e_y!ox8T(O*eUKQl?ha%7uEwDTZ7>AqprOHpFl3gedhCem@JLQ6Q@>ad}x7^|F zQ9x<(3n9-GsV0MawQly66UV*8u;dREi6gFS`T&A9PK_@5S~miiDj1YLgQX)iZungX z3L7LTI`^=bAskZ#7KH+L%wCo0k#)3NFSx^KVP2dulhH@xMfJ(030p`!dTGy-0}V|H zRe4%L0*n|;9({UV#DDv*Bp7*r{f0w65&?dG25)X)r7ma=k)@#3=NQPFK%;gvrv4>( z95JxcH4skJE5;qEQk#@iS^N%T0XJ%VY^6W>5KBf4|BF;u|M6dxS$`RNxB!cl*Z1N> zeJ?)ef4b#w7O#b|q0@gOdH=;vf0r*^zB#@Bh3m?Ula%e{M-H3<0T%Y+lk3Yn?*_(1 za^547kD$sUMt>6GN@B1Mh6aN8l29e3egE-EzK2}Kuq?bseR=XttGst9vc00K26(L1 zqp%6r~8hz;*#&K|+A^77*rYLu?1jS>Q5?&k{4I9(g(Qz>Ac8* z#h@|1*VOc{!oh#8>Hofjk@C`#ef;p=bBlxX)n#N;gv^j4)zTDV^2mUY5SxEaTCK#{ z>NXNSS>Zkbd=i?abAjUp(ULOV>#x4iVMkAASlv96I8Hbjh#AC+P*pIwTTsPTJ?{w= zJ6-5iE9?gX5S}=)?goI>kO-X7V*x&xsiiIcOnzA6xhW$oWa|qMa1v@p4N8RQp!V2q zkBC{G`bB|s%;^Q|vI-|%3_M5i@~r6$ED(jOE%j$O1+1M_nS;T|Ki5=I3_=ymj=Md{ z?m_LlEVwH&6%r0OcVrnGU(rZfW;`IYwyM+u#x5q)s{<~5pn4}OLc24$@!sha(P^9k#m{Ttt?rck|iSL<=b^vc%fEyMeH zuUgj!%npQ4j6HabFo`5%0B5s8tT|kkq=ImwIQwRX2Lhu)Ty1C&p7rJhzLP(R?FsC< z5E!lrw1y|Z-fMu2tKA)cmtgBltbd8n0s6C`$np`u%uWu*Mj zn?#pX9N!Q*;Wo*@!fO)|@-fSVL2C-#ghGcg(Lg;CZB&Ch@sE`v#d3O)yt1^WVx@7u zg8V%pTbtD}7_rg>aG#nuj|%5{$!c0IQ$#RPs`mkZXfjF%^3-ZkQL>nGZ=r{1BT8ij zDn^k`fHU+St>9GGA~3E&}xx^MnE{yPBo`t(8|(TeAA4>w>pTFaTXOy z8WP>6ZJM1l9H$LYn$e<3C90ZYL3qDEgHe@*9D-U}+#?UyhT;_FQf1DW~XJbGR>*aNiwOqE73#1 znh^O0#+a=<(P_Y??rjh2Zl!o_n4+iZ5oUO$C3Z^{%7t(jRetm|+1;^_EC1qP*GIco zN0rXK%ntjpUSxv0#FZth*USga`}bPh4J#?Si0o?O4Q-L}xSmG8A;y&uLTs`;zx*Jz zu6!+3Ucc(2uo>gyjj*t~2uLfqkO<1wD1A?LHw^}>1$2zP@0HQK{c!pf#-!5#6_KAi z=kH=lP2@)``isTOX^GTAjh_n_kD(?hzqA{39k6~KPs$Wz_ufJ74K1i{DpnY(37=>+ z)AjW=l$Y<*27Qr*RGsz4oDVfw>aO(-d@`5TOqcH}Dee-$UseEMOzf)KG;8+K=QMt| z{d82p;4H9gCn=1Od@e40A8mbb$NCGI;!ldpou_$4XbZ1{v_ds#DCVEU^JZ_6HJ+~7 z118gDH$u{nZsaFUQ)C?Imn3K=$6o^+*U{jEwmgkj)6_CN8`95*+5R7bKyaj`4_v+Y zMcr4G`M!6}0N{XhFfx2|r>5?*oLBl)#tjmyI;V6Z-d#QQdzrS~=^F?oNOdsJiv}>y zO9sp+Sm?{2OO^nDjq$)vVVy?^gwfbNPIDSaznLIsqGLC~m2>DXOxoRIP@g`@+@BP) z!yUR{pjq*8^h5MNW)X~nvTFqWnV{)*S}3HzfA4^bpn3~zO`(U+INgIA>R9m&5KpuV z=mPzQAUHoc%Cv9j{(`p)Oc=px#R(xN$NxcYDvkYyUd`FTJ9FIj((! z1+ty*L&4^|n#}i~j_Y3zhJQx2$~KlZ-%~CBmw>NBc}(H2%M+dvVtqz_aQK_R5b_at z@SR-35W>PjWn_)OI=v-g#l2&rpfsrj-D_PfF_dUZv|qko+k;EJkyvc4HSIj9kO+b# z_B)50mupWMo!*xd`yW?-t^oG!H3gveAUWx01j;k$<9qi9dJ_E1$A#@#!YrX3R`#-R z2e5_S5nRVw+eiVSPl47JH;luR$2?N?4F>EZGj?wC3XdR@QZ-#9NNmtlr8KK|MNYTx zpb0K9p+^!;-$AC-*I6Abvn*FZaR#7*)q-~N$9ZLIqq6a1LFQOHeuDCNXVh{Ccn+8@ zV)Ty5#D<-rQj{mo7G3flrYF(r>?1cfPHHQj`|B-xTEvL~$(|oI-Oc`LK1hxFNY0>1 zutQAA+A==g^w!aFh?Efj!=u2Kxsp?3s2FrsdAhb47vss8Y5qqx%n)yO(>%v!ErLO3 zkH}$kv(AaZfDOI-`jn5k#@%JpwyuGPLP{rvLca(@MB~TY*kVSQ=JZu+-bbk+2hwgN zrN<1}czc))vC=($sh~h|m_D>fZO$$!YJT$o{nON35B&o28o>MJ4=n8goM4FFYOi&%-hE7(LDgEP%(8) z(ZlIP+gHwdEufwJqe?xF`-Sn{d|q~CX>y*i5#gWVDd<l?SgP0vNJs!ewszkV6eoE!5r(CwG~Z-9xz%4XjM;c`Dxjtu7r{*nnn=2mmnn z-Dds&Uzh%S_x!Uitq$d;w9u$Sb;ln6Ezpex@aHCk8!?On5cIuf6jn+ArUwrSb`z6{ z`#mC+k_sSSk~dHJAYY!R=dD^=sq#}iM82Z(%D}qZdxO{N>f&KjbIsG%bIrq2^Q7xB zHFaDHmh^FN@0`c8x?^$m?B=S`b@TnX1xc7WT=sAl_SNU*{U@_-G^ny8e^xSgw95L?I9@D`S@QW03m^i0)U%a;#HIeCvX;_5R!ws|9 z$eg!V(;$-*Bz^ckVx**VnZcudYQw=T#BdnoixQ+i6z|+OZcwb6)gepaD@m_)@~$NduTmLR z@I7ZO{Emm+FyDJXOx?ph(v>C9Obz$^d)61XdbJ~m%sZ&LnyX%GCOU!=9(q@*7{<#^*tRLF(TDx2AeVdFvAM$4iu-J=~3FD!!n^P zfEhA`G?`lFTU$VSmRd(@%6l>w*$0?6U~=%GM-E`MH3?S3GOYuGj#Vs*&5vR(H8UCe zu4crYjwF;JxFGze^Iqza!pg#zR@e8 zv}jasD_2RkH$%}GW~MZtYOCNHF;v2I?pT3HTOZWZT#j)4S=)DzS|5I%O~ACSV$7u0 z%!p`RZV{eFLr`AEfm|VRZf#6&wK7Vp1(*e5LXNzmjJIyxNIX8!k5vQ2lC?uOj@+b{ zDPO*#)ai>%4u`c`=O0VS(!h(^Pp_8EAT3BymXw7Sk*X<#N`0w~3=Os?nW9Bfc$Tv6 z9E&Q+3e||&%Gsnb(?JNy9UwtS$LAR{ zGDjhqa+qj^Z8b1!B_HM^nZ|E7Y>E2}(F1D*kU8ecra9ulWA$9%Y^Aw=m- zV3AG=#?%JMekrws0qhT_wT>zjlDiJa{UQ?^4=GbwD_4B%!9GlIL0p15ht;aSyW=fC zDBy3zSmbgTxc$9`IDv=^KpoWx6@m`UV61T&t927*NY_>&6?ACRHE>pU|o-5y~#p`dKj7eV$Y(cdL#rJQxwFl5F`BiW3h&TFw*#8 zT$Dxt+saXtk=S?UMio%Qyi&`y1(@Y!^PpDzdBeax`InA$n{dWh_cY8T<=Cob;GF!R zVg3vuoKyB;Bl}+t=*&b+2|*57@D;xo7xXVp($cI2hORS{8gu7jXq;Aq<{0*xJuat* z;`cb8n#AA{no5YRy|syv>(Ua3G6h$ERB*GC_K%#1HCddo(j%-Z#@8lL)fsoAFiKIh-AU6|dRLifnPx5PAp^O@xw@+Xo&=%qNLQv2)!s%za zd>=$qG)8}(0y?@A+J^PCK6pRq(9#j+jsL8 z1#`zKsyf>qdP&K$+sp360q9mQ7<6Z1m)^DY`%iz{BQ5|l5=p~Ch_Hb~W6-S;u)t$l z|EohG0BaVyIY!CgeslNG+OUa4W3e%2c4?LLvHTga`R%^E`{f>WEa*ltL8eD%IG>Si z&lg_*D>NbQZddeod_JS=KCj_zx;Y-u@8OCTUj5tE;D4;ZU`JW~@%E}ofe4YhNPcx@ z?rb;8kNNqhn!wciV*Q`Wz5=StrTdz0q`Q$0=?3X8kxuFE?(Poh?rx+(8kBCNLsCMz z;rsdC`@VS5d;j;FwOBqZnZ4(kdS=d?*{4w^#Ni`ZWk%BX z(|yUbPT3QAEcN0Dkzq>?MrKcJA0!Iaoy0S0zo<~)%rztU*L|`ki+W?SO|z>Ts)O={ z!JQt+Hix^_yKCVcwG8vC+gA{Nx7RLE$rY_-NdJmq#i`VfWh&@5M1|DrHH3m1o6ubJB_=fgpb&~q7n1zZu;Brgtg& zJ?Bz5L7%GGkzY7qu!(lrFkuGfq#qb!E`&g&hNUj7RU7)Nz#UW8DAXE=Y?4Ei9k@rl zW4nTv(--(uknhIoprwcdveJel-xf-nIw%3}8Hdy=9~e%m@nQro)_7ps4_vT@eMz#* zdVT3S-N)pj@S`X^WT@<^2!oTeHRsMJ-Nx&*d+ScrE9S)fwN;O;T+!>@UA(t+_pQ5; zz2%Yo2+$Wct=3s(qnt&Gs=Tgav|fJATSnRFpLr^=Q1)YCj5k)BtL&|ld5J-rJNVG< zR+%gE&g}DZADIJ6cbI3c`=XK$x+(kv+a&<^b~Ezx&|b`doGZ}V7(mt;n$sOny<>Mq zZCteM!!BF0InVNI9>-JS6-j*0iMDc$XLza9)F!&|X+d8uL{PPbf0^9TJ~>+joqs}; zP=#T@^s!Z%IcWp+k|I44X(i9KQcCCScYeO0Fp?BmeE!g0ExzCaSDYTN7( zZl4|BOl=T4T!Tu>re;S3Z8;)ArS)=31)&5f^&}(tth?G5mk=gu$LhtDwu0s}5KUZ{ z%mTGIUtWCir6oyRMS?Gey>jPpMU@L^@&m}sr-5bG>aCgq0M}s*Xf~lPamX#|&tF*+ z!aOTji-$%WLwO22WI$mRCz`A?UM9`w#8ix&^T((sy?{UYN@T;Pw=*OwWQ<-zGAhBc z9Z)s@W(Q>zayLg$usF!iHypQtKwHF+ILl(}v`od0Im!Lt+k5ix>ldt81@Wy5<(38+ zF`#kh1uFe7fuTQMO__z}9P{6C-bVC>Rp7n8p#IbW%kK&W2mEc9juIt}(T#A9yF9xB zvkv<}Um7>BJAA3F8-^}n0SI$d~iqbts!yqNK5q7iHZjWgekHW;-CoN*us2b!Us8;%&qp35em7%i8t*n% z<#kDkjBQA)(>mcP$6O4nj7%`5jL5UweSHR(ns!(S8H`#AiBiTICps=e^DSSaYBJT8 z@(8nOj3wpsQXBI+<}uKQfEbxt;1NhwX)d&V1~Pr0VJsqHTgwhn@SG^ZWFmu+S$b}f z?cnFA$pL)qbA`}y<=qCV2t~_K#aBE6QMwOMH#G;9tB>7hm`(o0-w4k}L2;aTH`&hR z`Q^XVe%;FzDA3}}O%$2El$VoN^>#Cn5_BPm=$-CAaJn34FA#Kp>*`rxHFkw98?Z7d zXoSB`ewWb&&BJ_*xOq95N2bczv<9^jwziJX`3*4wGJgehosx(7L*KW{i!o-zec(=e z;WxXNU+E6BYfIZ$4Ui}dp~|;r@2)o61N-#l7n4ozHoGq`x#eg2y<;yT1;VnyajrhB_(GH`TZ@`O)*Tk zA^ON(_7?%}mY|UsN7IPWqi@+9K}BOnl9m`HW58G$3#-K=w4c%96nTYCtEul|J%)?M znN;_n6I??#7X$B`+`mo@iW*bee-c+_WAO4XK*&!--2H5DiXG|BU7HiM1xb>jL6s5t z7HB1cIg)i;p~sn4QwC!kT}@IsI>c+igxW!#NmH8E9z|8GT1iM1hez_@3>kd4r7pq~ zqU4f7h~O<`dJcUQAU0vXOokX&V^WTWgiVI1=Asyv%gcjxi>cvsd(xx)(kq~YHBl2f z-pG}HV**~;+2hi>%#{M1E70Z`FIl!zf?cHt>ZLw{$penq2jp8Gu_{abZslFGt7Y-H zT^m=BHqmHv#sYq(?u>v}tmzOY@;l$oNOo}rM|cC!n#N2^Q_lG0xI*wC#Gi6i^)QH^<2o@SjJtu|tj%sBY`Uc`id4@12SFpE1a$koyy{N zhb}?vJ!vX3#6kY`;$38h|ozfX|Q-CL`1*gS89^sl(Hi`4G%> zkK(MdjXR;-u|V5sy9tYuc%hVPbetgHpc^kL@J+5|l2>)JWH~2mbe3{F>afblQ|N!`>M!BnZj%es2ll$owCiJzm+u=HeeX0N=_^|{3FGuue$WAAwx6f zubLhf!4e%NB>C!I5xza}^e(vakinc3J=b>hCqs~u#pL$bllxd%wZ>lVi&|8rHrFoU z5_+jK0W+)JaWH!R&BH!Ka>t06@N1Uwd!8^yEog}RC6Zmzy2(&(U)kw?6N?X!YebT- z@LKbjKIo^bWJk@EZc1Dw`*aO1(!G(712LXzj^L?7!cjvMJ2XP6e+xFYpyAxqMkx#7 z635j!uI0Q(=D(IuC{N@}3L?ozh5@}V3X>?%@0cs&J2or^|*OE$4#OXGWqWTOSz2T|z%iay1y!=;m zgCRHDiN@KGnp3jM9m$4mZsONwUuiXJYs#YP3oyu-dE}C&oXPMD7J)|Yw$6b$_SN#X zL2B-xGCn|b%;AyMS`9W2_Qvv_ z`X1+EOOzM*hMmeQ!XeM~sJk8nV{nJJ<0a%`z-am|;I5gM-%Q{Q0#J?UZUc0*MUa91 zEcxeepyUaSB}>AGAqe9Yd@*j05u!w;TW*kivg#GWqH zZb!WpqnR8RTq1k%LKT1$1o>6x=|!9aBKV?Q*@dk z)ejEQc$D9(($^7V-#y>gk?n;)5>>?#JA7e&*1ZtSyevu9J^GchEiw5@kks{2&#p>< z68B4VN8SuYBjv*MF#H<=o#5U_$&XGd`=4J;q+>jG%Gr*5CwU~eAJ<_(5MF>xpKbq5qe=2Ou8*~9t z$JT7O12jMC_dr(1=4XjFc7|tl$ouR_pBKaUu^(XM4FYIx^86b^ujD_Nd6!hooKZK? zJU{Bksgi*P1_1de6{bY%+aUmljnxt5o`e4urVzpe*xR?l~6xe(i&q` za;t`#clD7#7HFN)s>(T^$&r1$aJk|3xLL_8=P@#L1cWQB3gNxoPCZROT5Pww9XafM z%$3)N*=t9DpT`htGcVajT zpl?_4p<5f2vTu-D|Jb$QM&hp6=jzTA?q*=#?ZMdYV|DVWLEjx`MW^d1@R+A>@tx1n z24w5qo35ytkkGHcGnj9CoyCAxrfN;%lnpU)>w5y5SbmL`oG!*FIM0sl^P*AF^RlPP7hf^gw~Y zBpQ3WI78#g>YyQ+ca;$D5%j^PoMjU`Waf3Z&8~T`H-oOJ7;f=j`2mUMenY%uj*eITMlBTk}R7+TJ}!6 zr>hnfpv|ycV%%*&O6M+94lcflYP_W#Q~GF*S1Mxf6ty*^uBEnOUFw9jE=6X4A(5O| z141ei(J^1vy6@niZbv&(1~Z5USmp9#QHj&l?OJHnYzn^~K!lEhGI{Xhc>h>g*K?t=g3_)%(Sgw&MLU(um96m{Dds|=D0c)JeEH#^nO|ATbH+ImgzWNPHGuUGLvt$M zE0xbob2^wVPT)<~X-rQaPALv6Ok%AF+}pg~xWwZu>mR6DSrunJq6C-S!1767DkzB} zUCPB`br7(d%Cm^TvEiv$*7;&;jxp;YpGYD>sCJR7ss2(6ulldNQ4M&LV(AIIWH_7|;IBo%vLWMu=%s zk>*^Po%(kDYr3^dM7{N6-J7dCP%9mZM`S6)pC!g6YNt8@&WH`; z+5n~dV_`=pDf`REW33WU>N*uEr?ixd5mwVLJd!i)G-YL6`6)2_4AI>xYB5BNR@jYF2evE05^RlWn&RwLF48h>H{ zi)~(sibLPJw_@b@(R*ZBJ=u>CU7et|kEyz{ElqTym<=l4GuClJcG*_QIggN{OFrN- zzq9Lh!hFrcAn&bdqh*HP9z<2{yB1ab@f1JZV(C7_o zz=`oBYM${aYS;MQ7^;#`DKvHFG0C#fxA0W-j*pSD@DXMCk!9n~3#(Pr_!FJc z+t$mR{2baZhTw0GP2i#}Kp?H{v^Nk{Wj9z&#U-7w72}EwJ!)0R=|ke}RCtF*maI9I z&#BtmwM$Ol^x9=`_F|wGp>UHzNN{rJXBl5fWDE^Z8nrr8hViDYeI&KWbN0p+mq9n! z9}u51P~CAWZ_b)k`u>2+dZWg6er-;WGwy&*aN#`~Ad_e1X}}4hj278JvxMT7YIuyK zq%Pbs?Q2+Rc7u~zC}+2K?{%$fN|)uA6>B%g9mFZoetShW{gL)a`mkygLWF$7s} zs#B>JI2ANmL2V5AlL(IM4v%*kRf|^R)fs4?d`UV+TxJLhak5rYTehsrCAUtaF5{2jiXCKqR1KP5X zj;dQlcBRti(S$3oU*P2N*$N>Jh%5Pr-`5@BIGE$t6~gmK>^~h1|Kj#FDACP_8aO&v zY5|bjHk}q;WD!6S>IuMzM#p$d_h#2K1%EPz1chu;U|B-h7nYzS2$|djv6#O{HI3J7JdqF~g4i52Th)IU#2hV4n9%DpH`yq({`^feU;SXeSk}A|{<72( zQm7lJ?fbV!c43@?R4U9eJ!NTC-W_t8unTqIcwEM6FPI>82*=w{7Db7$AT$q~r*K5W z9GlqWi9=C9Sa1wXHZG#`hWew{A&4HQ}G9`~SHsCSqc1XD{!d`{#fEacJ4A^zH5B z8^i}0b*MaG3Gf~+U4tyzI&h>aXG+CaZz)uS$KPf5U{gTHK^&vG1Fe7>*}qA)x!Vqp zS{GzL6o)~TOzooI$gFj9ymU8ms%_!;_~3r~f-q1RX*ig90vT=GH(0EOpDxf^vZKm- zL4%1(pd-nf3#3oj*+$ks_!a~oN{oIg)oMzB@bg&uM1%&3(LkvSY3y)mOkR66ky$CF zqCC2OZWg9Z4ZbqVbZbBT^f&W|$^cR=+Ul@NS_bFXo06`vF7Q)z9O}?x1+7nUJ(&}= zww~gw3hi_-Jd2uv^*v_t3}lMA+J^N?q0w*?;RTc;jC2?<`(Fx)qzleP5jFxB2QSBy=`a5_IYAGHyN zU@VWp0jfBv%A`ofRNxGJXwS5`Rl!nkQDia(Qf;*^ID9614g)`(q45}_`Q`#Ev;Oyd zMx^onl8r-nsk!qGt=jbaC9R9k6sX>Z`AUTfhdWxMm>%|BqnV{@$ zv{+>srK;Vdd9hdrNJPoY>^5G@)--;ugpp*mRHym^Ic4hkBYC)tJv6Q1R`&A;<-IO) zeqoIJP4yj~4)@ou$Aay;d$)0hzp>6sPcBOyFyzyP0cJI&&y^8IiF9Ohw)ThUqplPn?N-UH3A6YYP7qW(`~^XEHlKSV144mJ<<4YUWu`5LJF6d2jGpF=TP z-;{ze5SjGkLHJn-a;XOXxSdRk0A`va*@k8!#|5ZM!xO;Ls$8GwN`w9os z$4B=^U~MF7O8qx)@dS!xP0G#rLNc83GCh@ zn>8yKd&k(nd9K(puC1i`)`FQwM_u8SiM5qH_8a;ny0kKTcG#(%Olr$cU)(*9l z>2K7L#)!O06}gYv{gkj#Rzn zcx0p*5Rj+BH+2S!<;Lz3cc~a>(XoHUXo#Xe=O83PicJd*yj>xpy!r2 zTNIQy5*c_HOuY==rd}8yNG)2u%yB~)Z!c=1t?38z_^G|LOVf-OsHYqgo-Ncqzj1vThQDyFzzr zo1ptcbSaiCE!sySPw!QJ4h2qcfJ?Jnl?$yzNWhR(NU-lM1vSpuYOF74Q8j}8onB(!^^1rv^_?69 z@sH2$F{?XUCcOt3B>Itl>pSzyDERY6f$|qG9A&fzk%fT?p=!nc$R#qxN%jN-jzE$z z=i;ypGHKieN*i%&H5e2#V%cb;_u~U0>JpOKV*&a3;3)|nOkqBG%+O@K32ax#BRE#A zfFQK(Tuj)KGmQ*%Oxw#Gmt5QKMokmE54ZiipjY(EvRNpBa7;dk;pkuG5^N}aidw3A za*4ZTvU-<_$*TPa#Wgx>oVj0asA6SGW}%6IEF5bfXZ9OljglXOv?KP1F}5eIR@0G= zMJDRIbNE`1ox#KR$ z-A;>d_30|o|G@AIAcBIezROI#eeOVdaoAogr^<)(iPbta{FpJuQVjU%3bhpfxQKV6 zO9tXBp?`Mpodl8*an z3&cr76M0nnUvM7b>eZVZr@nQ(vImWiUZPqG?>-yZ|Kdz0#-&TQ6A!dR=DzpYFyi8Z zFp$Kad7?_ALe7Sr&+F{)Fr+RV%s@}lNVspkUX}Xl!u;JEXanUUsKZjnCR52BQ%>u=l0=vkvGOwI$X%rXwF!=xFInF~sO{|sn|&VYX^P_2 z03jS$&CbSCXTYaRnahTWTD@xpWdF=3BcP*xTCl(F^NH%fDV4iB{G4 zYpe!IgAhQ~W8vO3!3zN{ce+A;Kx4Vx32ch3P?Wvb|1w+GBo4GNCD6&wotscHPj*(T zkt=z4iM}@r<;BT&dARxL{Ir`lW8nVrXJ#Dx6D^mnUOv|jZdPo~Y zhx|5OO*Goj>U&?bJ748dGI>8I;n%sViC1VrB9*?BQq)O6*Y_++26rNzmop@+`sfd&_**)5U~h%{UJW!Bta2)$wFVWU-r<)H ze`RGLCrU$eh_TF&OY3fS`cSQaolk&aC|Vsdy&R7rO?ZReDTGAGH~rga zMm>b!;b^!8JPy2>5@`hcfl8oEQ#9!-ALd> zTXHhAM8qO>Z>?!AUlVIqR}5e~`r9~ej(k))Y;{T>@n`7T2BvU#WcrGCSH8ku9LGEC z#Q4>XA>@`mOtY#Qhi=`o)gMlaOgxv{CJp$D*qTl_QDEVCnCN$ghyEneb@oU^tJo_P z=l72IUH$2mMEt;!>zJ!zQAomF(4S#e4oC82*Tp5$aNT1C+bu-s*dc4Ulo9RI zcnEsw%M( z>5p}jc%(TvrlHE%8l|w1aifD7RYOJr0^Cf9;4;4KlFuBP%u|WR+Ws^t zB@n#Re+*^_&Cj8b=V|@oe zBXpN~!f=Y^6ct*&tqvmD&#!DdQ%M>@s!M$V?U!S~g zs^zG4hnC|X5NRyP*}^8YtyXyI)jhJXlPq}@#y-7B2Xle$`WXAJa=s2l$49zAuAPq@ z1DBlXm3F$|D_+X_GUGPH&C{gIBy9U6(3~7`)_S4h3ep_>EF57U*$9y(3jFh$A}Bb4>#uvx zw*t_3MS#`qB?J%<_phtlpVqa1?m4AR?Cb#QJAbTl`E8AUx<8m6uPHmJ4G=0BWo<71 zrk_RF&WQ28jZiPx+ndanjzNaGQ`=V=p)8qYNqz!-g|79u59gjxODyN*aTmgB5MxFm ztsRbJUjo?yhwSKe`fS~Q!hC1>qoFZ;RqT-qj$1BcFop*qj z6k_P7Kbwoov|boLfKP1U^t<4LTGC=_+Sqx~t?{L%w6|oKxmApQw*Mu)o+s_adm8Bw8+^}kJm@uF95|yjjOuR#*OVLmW!>N$A9BiTu zq;Du?{P>a%D>lKH-`s)E-uq(q-k)@a)&`SmoN;AZrpXCm5}VK&(Y{GM%NxgwF|*+!nY@^mHfTX@UWPZ zRlzveBWtaJq~W6cW!DcE!8#?0=uATw%Y1=SmyYyO_^Labgi4yfz_nB|F&%%64BP@4C$k9KhAb zCmVIb)IQ3tL0}g*%5CS6i2;+1RS}sr2?rm`2P*jr>mbz5;tcckt(W;a1$?A06Z;glg8&Ee!`%~_qKw7?judI4X+pE zW!uEG;@gP8ooqzSLg(h;lZzaEXpoIR+=tcOFAbV>7bHjok=7yhUL+c6Qa6!cG*sF9DQpK zV0Cc^Tn77BBS65w(8|`}Z_F+h@+iuvo|ZM%HnqlL@bUF?Vt#yDQ2eEQh_Fb}$RQT* zS?3z0RVJtPXTO-2cxCfuKX#4O`6U!zs2knnKj!l0Oxii=L6KE;+7!Gy9Bdlp*=`!m zaJzUoKeD0&%CmXbr#h6LW2hy9X@`r2g^X@Mv#JIp**J0>l0;)y1~mtd#i+zF15q_% zBi{${I-Vg|Ix;OBv7Y?|w*l+s;ON}dK2V3oNG)EW_pMW@T8YNAylSnIe8f_krE!B| zX@x4503Be#NXJkn>)QQJlznT7Rm0FUmH%@}3wAe_&YbC#M2dqbt>P!-&&dWKn#
  • I>8zTE-|?JQyi(0<%@Wl&Dpjfu)|3U>)oQ%V1Lw@U;X_x}XR0 zPg2a47;F=KX*RKTAEEymxujBO99NnCX%X}sl0{K5LZU=ly;<&)y;?zQvC|dV3(x5i zs9nyx@dhS5?vSr97Qs`t5TxbXAS^lXP2;Fm!fiBIo08x}eSqN*RwSrgYGt@~x7-+C zrqmf5V&$~$>qWyFc2mix1mof!c1XcCp`~>P`|7j~+5{1~5M%XjwZPOLgZC%GgwE*% zJBt^s;y7{CMEMhVA3Ej z8T}L08nNYwFj?O>lu5Wwzq_GCd}D*u4K8ph31Z#kwUX@^)+qKd>r3#p#J%o#wo=G8 zL|ee8M2Yr*K06oPP}Z|^SIedFTXo2WFD9!Sl|Y*ol`3uG2Ui|WHo?SiZxJqCK1WPf zoty3@+=!u$3PMonqYWE2&dE#;zgKo!kcNcY0{1)C!rZrx&hQc3dzh1sZ+PQ6Z)RVa z0`bmB04?opZ9y!}siyNXmsYXThZjy?sMqgLAq?g=W>@bKu0Hlir}a=1pJl;Ja)pEY z<+M%?AY>L@oL#y`8M2ui7HWm`dIiv-%kFnRG7Wn;XUA6WAsFnJqzf&Rh&`j26*~%Y2R-e%g*<$R2!n zjbeB;3&|WOqq*O57)Q{9L83}ndmywFvIRp*P7DI@K4Bmjp2z#I;=0&x5lK{=T^hN| z#ir~;X&tkaK3aF9r_(cFOu%$ANbWHi*?$_B41cwux9z53Dw7EzhX7<2K7OP5*602V^M(_g$ADGER2EipD8s>J|!ityY1Bf+HzXg=~ z#p*ZNrLHl-bIFf{`K6rgP=bjF>PvhPX)(xxD-ICt+xEW4Dg)US(O(q{Cl>s>RNQx4 z%!_w+GsB?SVks;{CK3hRZsG_0G;ey(x0@SJ)9&*9;UcjV3XAt7^^JxagJBF0!cw4jT^r;L(BFM2FJ@eRyKA6^c2)e656oT&} zMQ8U@s)vs(47ucoD12y)2ZqJhB@_Dc%9FpIpk|)2WHM)DAs6yQY&2LW8embrF4pO- zrK*u8cBv<=` zuUhsSeM&+xiQv2t=xQVsxRB+7g912J$4`d|SCt)~QX$=rK|;v-YQ5ec4?C6%n3=NM zFQ;C%yBsZ8EuZy|(yaiY9uIXOZoy-kj#p>tje7@@en{d>P)sqG>ko}o4D_Mh9nelC zO338Z9{X&$q>EWB*6%vaU7@y&Blm%lQO(Az>osDA!YxG+c+iBA*ikyzgu?}>EN4WG zn>#T#QX{^g?yL<>r8(?M15|^mO&K$CDuR(Fc*OuF&&ab9onOu%_M;X~oeF_qBMM2Y=E7v1B6`j4awv zOmlm<%BI_-`E403dbcy(2Tc5nkI*TY3%1yuI#+?l*oHUhos2vJej@AWJgV>-(2^}W z^U3-qYKZR|!5*bv^h1?{QVqz9#bg?TTH|cIu@gQK3gRUavC%HH_DQ+`ZoWWLGj}^a zT53+<@ngtNe>tN5#cnf$sNzGrfZO0JBsxUbtZ5VX%m={^QHG)Z=*^9!O_YICanx&6 zB^NT1x*1Dsq_@i?1p={Lje_{(wxt8Buf_+7gWSHF6yp(RLIrHSDZ=ZSBO^n}b_GsbhYxq>_mJK$W#rL4@bH{9Cnf^9nW@-h4EaQmg1vPR zXn>muEF|=Wx>~|9M5=bF(uY%$33`m-i5lY+%wUxZhS`Sd0sR`zh90K~hVevOybZ1U z8U*$j6`B+zuEVx8kFUDBi>;j--7C7X%$prdSxG^;$_=McWu2!B2Ht!sw`u7LAiXCn z8gRs7jMBPE_-5X^Mf`^PcCoOv{eZn-uC8f2*YSK83kPMq?`Y$ag)q7RFzpSRF<_vlg<(Sn(!fjj9hM zg9oloPI!YUM~HDBq++uNv`e1hTy$M=J`!F9% zu-OHcjYhs1!c7v7cO%qDoP)R(lqb}2f*FX=HQ?)Q!sxqKR)3w|cV5DuhysrAB>~-@ z;U8BGfPJ=ug@L8Lu(O`QeNKoQ?Ml_N8yE z%D=V8E{I7MVr1GgYocoW4(v|BATEhU>#Mr>HDmF13FYMB_y%GXZWtM6j!|bpcuCl= zgJavbkjM8X+e?<97())p^)-kpBWdVdes!9IVptp!%N#mz& zU83~w{;NHSRA&Oh{Ya<2!ET9+k;Rxe0|79#Ng-!@9NGw!i8+$1eKg|6#>F2s5bF}1 zZwuhaBwi<9&sZ~Lspg2E=c9q;w+|_FR5Tqg+zieY1HYH^*AocHI*n3B+V>p(I4Ja4 zuf9BLP+K0Ae;CKFsK zqtVh1mP=d9!N@UcQ)U&u0xwam~=HhXF0J0GNo_ z{;^g5w21ui(FIsatbZ0Zi&y-QYOAN^?6@2!rj`76a3MGxD+6yrIHWXPbPW1rqGLU* zVVYgF3CEtyHH5XWcO7E7SNl=K84Ij-RT680Zh*ZX$7s`KYn@kx=RL%mj>vH4E&a0- zWrQtKngPUW3O(pSWGxAmn3(ljR4s9pjqeW7!q7O#T1@s9m~1bc8E zXx$S|sS%({*JXG~d00hrEW?YlxPC{1`8ysg8i7O0)kbqlpDc0BBDcQ0@OxWNvqYJk z@@>ywrDRS|zz%RKnpLtIm^{Hkq=er?l&R zuOD#q&)lAZVcb#v4xx@*kEL`phs>X_kV=hXv3e(uyi7&0hUc(3EWlvi8%SePp;fQ3 zWD7saKF*~V)k}yPzRJ_>tTDrG`99nFsHxGhI?EPLP%b|gJ<1A}N>lUFmyo##a8*Mc zN5Nq^wqcP}TL;rJ$|}Xa!21=yAHT_{V$S|Jyz-D}MDK@jnC%MrI66I_cB?jKZjUnu zRHE+*xl2F?3Th@5(P^)PzWadz`<)%GOE?A)#l{19>u`f*@=?j{+@%l1)>RCp5!nDA z!*Yq9kT<^~Iv8;*n(2kjyJ~l{6yo-CpAVf01H)jT7)uB1jB3WE50Yy-gNnM3D_eyY zUp(c=&<%Gl+|k`22}!o#T&n$UPw+{gRZMnNBgZHqgNdRrMj02uyIk8u3X$V9Up&?y z?g}%Y$E*%UXGFz*qy{)kIEkbS_Y2Cv?Ul*U)i)aaF;0FLDS678_g^sTh~tPOBzSA& zZupB;dUu|kh(`~cC2#{qh&*6f`kQ&{zw$Xh*$$t%9K|dAA^Haj8%A9pY*D^4hOn7y zzwkO)Kn!Z!SfB)Hq6cs&H*2At2q&29)wPmC$2{Icuo}gR$b%~2U-nrpeh zYB1m+2owt=#hs|B5B9APpD=qh%|kN~0q}2G_0=)c>KN)=fn-2!+j$?4RYn=`jl-v& z!bG25%cI)mU2{9zSarX3=`LpmX^?NlzP1T#&Syw_0;=+js!&tRW&x=pSdmErnADGs ziUhUF=N9j##k)Ez0*4%CC06eEwZxyIr7flLGMdP zVxZYs$+t24c~km$3$Dn&n)!40*7CUqW>hEkDrq-7KAWr&9=nxM0h2U9)d7eA@bjmh zJwW#EpSA2apDWlyK|ujI+KEU3F#eYS0r`M*9ZbyiX>ASk3{0%;e^$kpq@@}e8a<^R zmX@HcVVPkXX95Fw=s`lke{lJV2FM6-+z0#)41x*-C?5bEG(%6~2QpyX{3ii?yq+a# zVFi9#aTyVS?&e>!ftcrS@&o=@3GnmdWuMXyaj^eoljfHZ7ZH|McqJ|JGr}prhvi3( z{}KaC6hEajz_XRD(O)>9=KvT+e@Y#I=N~!#)b;--svmXml)eETf6Dl0%z<=ezz^{gB;bh} z=C`QgLIB+yb1NPFKbBh0@pP|DsCNJ>1nd`o_(MEFq5-PwH>G?7h^oMEXJ=rcYyMxs zz2}&EbB8H508B)^QfLw>D?J@^ArpW&nu)H%Q^=F&uW{*G)`0-%ZG3>9#{5J9 zc!J0IEz5svngpOy=YC_1iNro<(03hLi3OnZ00t5LUl=U$e#`LO8&(Vu zR^o4)A-xpyY6{Q{tbhm>f9)zVfM0*p3;>Z^dmVFgl0PLxpO^N?RW0!dP}*rgX@A6% zc}lu~@%tM*0V^x}9}0BVKSF&1cC}9ujnAQtef14<0eI7ZpO;U#fTy%qzlHu^RsT&{ z^Ihnhl>luA1o*W7m`|UQH{EXmrL2tpw!1CetWaVCG~ky3or~xR3GjphFp~b}#s6En z-w!zm3xX6pz@)Gbs5QdBz?lNHuzmygXS@6!?RnVMC&1pfzeN-MeGt~?P4Ya*)e~-| z=0D*6F67m7=;wino}iVqe+&KR%=nuyMbDw1$Ax)s`?p`#p8fd`B!q>zi>X2QGVjYvHcgE zznNuzRR43q;wMx{yMKZDMbE4;>f=@)p?!OTIRQ^Bv)jvxNJ|}yw!S+N( z=ly?^{eCn*zbyNNIUoCfW0L&S)!FC8e}3!ciRv!p7pmv0j-Tr8|7iQ?H$k3gMga@< zXS3%|uzxnm-`@-Q<$azxw?7dXWc@<)AN0R-b$^ch++p|$*(~?}M*e+6KX+Do!W1n1 z1@qTx`3Kjf=cRw{6!OF*T=6fM{=r4$In(o_<0qz&nt#Fcf6ju>&k&#RSL^--{y&{4 zK7WtPZLk<6$>h}ja i9&Y!40HvbdV-x$)L!#}qHKfx=u{uBJ4~P9J|E`R@e(Q?LF%=8)&6ktcYNuKz9R{?1VH-?IMKEb_Be \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin or MSYS, switch paths to Windows format before running java +if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=`expr $i + 1` + done + case $i in + 0) set -- ;; + 1) set -- "$args0" ;; + 2) set -- "$args0" "$args1" ;; + 3) set -- "$args0" "$args1" "$args2" ;; + 4) set -- "$args0" "$args1" "$args2" "$args3" ;; + 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=`save "$@"` + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +exec "$JAVACMD" "$@" diff --git a/extra-dependencies/kafka/gradlew.bat b/extra-dependencies/kafka/gradlew.bat new file mode 100644 index 0000000000..9618d8d960 --- /dev/null +++ b/extra-dependencies/kafka/gradlew.bat @@ -0,0 +1,100 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windows variants + +if not "%OS%" == "Windows_NT" goto win9xME_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/extra-dependencies/settings.gradle b/extra-dependencies/settings.gradle index 18c0c39e72..e3a4ea2296 100644 --- a/extra-dependencies/settings.gradle +++ b/extra-dependencies/settings.gradle @@ -9,3 +9,4 @@ include('selenium') include('hadoop') include('gcs') include('aws') +include('kafka')