From 669a7ab528f716031a98281078275bc525502e4f Mon Sep 17 00:00:00 2001 From: VictorCavichioli Date: Sun, 1 Sep 2024 17:15:17 -0300 Subject: [PATCH 1/2] Introduce cassandra-test-image and Update Docs --- CHANGES.md | 4 +- cassandra-test-image/pom.xml | 99 +++++++++++ .../src/main/docker/Dockerfile | 11 ++ .../cassandra-rackdc-dc1-rack1.properties | 2 + .../cassandra-rackdc-dc2-rack1.properties | 2 + .../src/main/docker/create_keyspaces.cql | 17 ++ .../src/main/docker/docker-compose.yml | 93 ++++++++++ .../src/main/docker/ecc-entrypoint.sh | 88 ++++++++++ .../src/main/docker/setup_db.sh | 18 ++ .../src/main/docker/users.cql | 5 + .../main/resources/generate_certificates.sh | 160 ++++++++++++++++++ .../AbstractCassandraCluster.java | 66 ++++++++ .../TestCassandraCluster.java | 37 ++++ docs/ARCHITECTURE.md | 130 ++++++++++---- docs/TESTS.md | 26 ++- pom.xml | 2 + 16 files changed, 722 insertions(+), 38 deletions(-) create mode 100644 cassandra-test-image/pom.xml create mode 100644 cassandra-test-image/src/main/docker/Dockerfile create mode 100755 cassandra-test-image/src/main/docker/cassandra-rackdc-dc1-rack1.properties create mode 100755 cassandra-test-image/src/main/docker/cassandra-rackdc-dc2-rack1.properties create mode 100644 cassandra-test-image/src/main/docker/create_keyspaces.cql create mode 100755 cassandra-test-image/src/main/docker/docker-compose.yml create mode 100755 cassandra-test-image/src/main/docker/ecc-entrypoint.sh create mode 100755 cassandra-test-image/src/main/docker/setup_db.sh create mode 100644 cassandra-test-image/src/main/docker/users.cql create mode 100755 cassandra-test-image/src/main/resources/generate_certificates.sh create mode 100644 cassandra-test-image/src/test/java/cassandracluster/AbstractCassandraCluster.java create mode 100644 cassandra-test-image/src/test/java/cassandracluster/TestCassandraCluster.java diff --git a/CHANGES.md b/CHANGES.md index 09ecd754a..7bf2577e6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,7 +2,9 @@ ## Version 1.0.0 (Not yet Release) -* Investigate Introduction of testContainers Issue #682 +* Update Architecture and Tests Documentations to Add the Agent Features and The cassandra-test-image - Issue #707 +* Enhance Test Infrastructure by Adding Cassandra-Test-Image Module With Multi-Datacenter Cluster and Abstract Integration Test Class - Issue #706 +* Investigate Introduction of testContainers - Issue #682 * Create EccNodesSync Object to Represent Table nodes_sync - Issue #672 * Expose AgentJMXConnectionProvider on Connection and Application Module - Issue #676 * Create JMXAgentConfig to add Hosts in JMX Session Through ecc.yml - Issue #675 diff --git a/cassandra-test-image/pom.xml b/cassandra-test-image/pom.xml new file mode 100644 index 000000000..c28e36af3 --- /dev/null +++ b/cassandra-test-image/pom.xml @@ -0,0 +1,99 @@ + + + + 4.0.0 + + com.ericsson.bss.cassandra.ecchronos + agent + 1.0.0-SNAPSHOT + + cassandra-test-image + Test image for integration tests using Apache Cassandra + EcChronos Cassandra Test Image + + + + + com.datastax.oss + java-driver-core + + + + + org.junit.vintage + junit-vintage-engine + test + + + + org.awaitility + awaitility + test + + + + org.assertj + assertj-core + test + + + + org.testcontainers + cassandra + test + + + + + + maven-deploy-plugin + + true + + + + + maven-install-plugin + + true + + + + + + + + build-cassandra-test-jar + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + + + + diff --git a/cassandra-test-image/src/main/docker/Dockerfile b/cassandra-test-image/src/main/docker/Dockerfile new file mode 100644 index 000000000..9e75e92a2 --- /dev/null +++ b/cassandra-test-image/src/main/docker/Dockerfile @@ -0,0 +1,11 @@ +FROM cassandra:4.1.5 + +COPY ecc-entrypoint.sh /usr/local/bin/ +RUN ln -s usr/local/bin/ecc-entrypoint.sh /ecc-entrypoint.sh + +COPY create_keyspaces.cql /etc/cassandra/ +COPY users.cql /etc/cassandra/ +COPY setup_db.sh /etc/cassandra/ + +ENTRYPOINT ["ecc-entrypoint.sh"] +EXPOSE 7000 7001 7199 9042 diff --git a/cassandra-test-image/src/main/docker/cassandra-rackdc-dc1-rack1.properties b/cassandra-test-image/src/main/docker/cassandra-rackdc-dc1-rack1.properties new file mode 100755 index 000000000..8160fc9f5 --- /dev/null +++ b/cassandra-test-image/src/main/docker/cassandra-rackdc-dc1-rack1.properties @@ -0,0 +1,2 @@ +dc= datacenter1 +rack= rack1 diff --git a/cassandra-test-image/src/main/docker/cassandra-rackdc-dc2-rack1.properties b/cassandra-test-image/src/main/docker/cassandra-rackdc-dc2-rack1.properties new file mode 100755 index 000000000..804726b34 --- /dev/null +++ b/cassandra-test-image/src/main/docker/cassandra-rackdc-dc2-rack1.properties @@ -0,0 +1,2 @@ +dc= datacenter2 +rack= rack1 diff --git a/cassandra-test-image/src/main/docker/create_keyspaces.cql b/cassandra-test-image/src/main/docker/create_keyspaces.cql new file mode 100644 index 000000000..682d640d6 --- /dev/null +++ b/cassandra-test-image/src/main/docker/create_keyspaces.cql @@ -0,0 +1,17 @@ +CREATE KEYSPACE IF NOT EXISTS ecchronos WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 1, 'datacenter2': 1}; +CREATE TYPE IF NOT EXISTS ecchronos.token_range (start text, end text); +CREATE TYPE IF NOT EXISTS ecchronos.table_reference (id uuid, keyspace_name text, table_name text); +CREATE TABLE IF NOT EXISTS ecchronos.nodes_sync(ecchronos_id TEXT, datacenter_name TEXT, node_id UUID, node_endpoint TEXT, node_status TEXT, last_connection TIMESTAMP, next_connection TIMESTAMP, PRIMARY KEY(ecchronos_id, datacenter_name, node_id)) WITH CLUSTERING ORDER BY(datacenter_name DESC, node_id DESC); +CREATE TABLE IF NOT EXISTS ecchronos.on_demand_repair_status (host_id uuid, job_id uuid, table_reference frozen, token_map_hash int, repaired_tokens frozen>>, status text, completed_time timestamp, repair_type text, PRIMARY KEY(host_id, job_id)) WITH default_time_to_live = 2592000 AND gc_grace_seconds = 0; +CREATE TABLE IF NOT EXISTS ecchronos.lock (resource text, node uuid, metadata map, PRIMARY KEY(resource)) WITH default_time_to_live = 600 AND gc_grace_seconds = 0; +CREATE TABLE IF NOT EXISTS ecchronos.lock_priority (resource text, node uuid, priority int, PRIMARY KEY(resource, node)) WITH default_time_to_live = 600 AND gc_grace_seconds = 0; +CREATE TABLE IF NOT EXISTS ecchronos.reject_configuration (keyspace_name text, table_name text, start_hour int, start_minute int, end_hour int, end_minute int, PRIMARY KEY(keyspace_name, table_name, start_hour, start_minute)); +CREATE TABLE IF NOT EXISTS ecchronos.repair_history(table_id uuid, node_id uuid, repair_id timeuuid, job_id uuid, coordinator_id uuid, range_begin text, range_end text, participants set, status text, started_at timestamp, finished_at timestamp, PRIMARY KEY((table_id,node_id), repair_id)) WITH compaction = {'class': 'TimeWindowCompactionStrategy'} AND default_time_to_live = 1728000 AND CLUSTERING ORDER BY (repair_id DESC); +CREATE KEYSPACE IF NOT EXISTS test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2, 'datacenter2': 1}; +CREATE TABLE IF NOT EXISTS test.table1 (key1 text, key2 int, value int, PRIMARY KEY(key1, key2)); +CREATE TABLE IF NOT EXISTS test.table2 (key1 text, key2 int, value int, PRIMARY KEY(key1, key2)); +CREATE KEYSPACE IF NOT EXISTS test2 WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2, 'datacenter2': 1}; +CREATE TABLE IF NOT EXISTS test2.table1 (key1 text, key2 int, value int, PRIMARY KEY(key1, key2)); +CREATE TABLE IF NOT EXISTS test2.table2 (key1 text, key2 int, value int, PRIMARY KEY(key1, key2)); +CREATE KEYSPACE IF NOT EXISTS "keyspaceWithCamelCase" WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2, 'datacenter2': 1}; +CREATE TABLE IF NOT EXISTS "keyspaceWithCamelCase"."tableWithCamelCase" (key1 text, key2 int, value int, PRIMARY KEY(key1, key2)); \ No newline at end of file diff --git a/cassandra-test-image/src/main/docker/docker-compose.yml b/cassandra-test-image/src/main/docker/docker-compose.yml new file mode 100755 index 000000000..2fbff44aa --- /dev/null +++ b/cassandra-test-image/src/main/docker/docker-compose.yml @@ -0,0 +1,93 @@ +version: '3.1' +services: + cassandra-seed-dc1-rack1-node1: + build: + context: . + ports: + - "9042:9042" + - "7100:7199" + environment: + - CASSANDRA_CLUSTER_NAME=cassandra-cluster + - CASSANDRA_DC=datacenter1 + - CASSANDRA_RACK=rack1 + - CASSANDRA_ENDPOINT_SNITCH=GossipingPropertyFileSnitch + - CASSANDRA_SEEDS=cassandra-seed-dc1-rack1-node1,cassandra-seed-dc2-rack1-node1,cassandra-node-dc1-rack1-node2,cassandra-node-dc2-rack1-node2 + - CASSANDRA_PASSWORD_SEEDER=yes + - CASSANDRA_PASSWORD=cassandra + - MAX_HEAP_SIZE=2G + - HEAP_NEWSIZE=200M + - LOCAL_JMX=no + - JVM_EXTRA_OPTS=-Dcom.sun.management.jmxremote.authenticate=false -Dcassandra.superuser_setup_delay_ms=0 -Dcassandra.skip_wait_for_gossip_to_settle=0 -Dcassandra.ring_delay_ms=0 + volumes: + - cassandra-seed-dc1-rack1-node1-data:/var/lib/cassandra + - ./cassandra-rackdc-dc1-rack1.properties:/etc/cassandra/cassandra-rackdc.properties + + cassandra-seed-dc2-rack1-node1: + build: + context: . + ports: + - "9043:9042" + - "7200:7199" + environment: + - CASSANDRA_CLUSTER_NAME=cassandra-cluster + - CASSANDRA_DC=datacenter2 + - CASSANDRA_RACK=rack1 + - CASSANDRA_ENDPOINT_SNITCH=GossipingPropertyFileSnitch + - CASSANDRA_SEEDS=cassandra-seed-dc1-rack1-node1,cassandra-seed-dc2-rack1-node1,cassandra-node-dc1-rack1-node2,cassandra-node-dc2-rack1-node2 + - CASSANDRA_PASSWORD_SEEDER=yes + - CASSANDRA_PASSWORD=cassandra + - MAX_HEAP_SIZE=2G + - HEAP_NEWSIZE=200M + - LOCAL_JMX=no + - JVM_EXTRA_OPTS=-Dcom.sun.management.jmxremote.authenticate=false -Dcassandra.superuser_setup_delay_ms=0 -Dcassandra.skip_wait_for_gossip_to_settle=0 -Dcassandra.ring_delay_ms=0 + volumes: + - cassandra-seed-dc2-rack1-node1-data:/var/lib/cassandra + - ./cassandra-rackdc-dc2-rack1.properties:/etc/cassandra/cassandra-rackdc.properties + + cassandra-node-dc1-rack1-node2: + build: + context: . + ports: + - "7300:7199" + environment: + - CASSANDRA_CLUSTER_NAME=cassandra-cluster + - CASSANDRA_DC=datacenter1 + - CASSANDRA_RACK=rack1 + - CASSANDRA_ENDPOINT_SNITCH=GossipingPropertyFileSnitch + - CASSANDRA_SEEDS=cassandra-seed-dc1-rack1-node1,cassandra-seed-dc2-rack1-node1,cassandra-node-dc1-rack1-node2,cassandra-node-dc2-rack1-node2 + - CASSANDRA_PASSWORD_SEEDER=yes + - CASSANDRA_PASSWORD=cassandra + - MAX_HEAP_SIZE=2G + - HEAP_NEWSIZE=200M + - LOCAL_JMX=no + - JVM_EXTRA_OPTS=-Dcom.sun.management.jmxremote.authenticate=false -Dcassandra.superuser_setup_delay_ms=0 -Dcassandra.skip_wait_for_gossip_to_settle=0 -Dcassandra.ring_delay_ms=0 + volumes: + - cassandra-node-dc1-rack1-node2-data:/var/lib/cassandra + - ./cassandra-rackdc-dc1-rack1.properties:/etc/cassandra/cassandra-rackdc.properties + + cassandra-node-dc2-rack1-node2: + build: + context: . + ports: + - "7400:7199" + environment: + - CASSANDRA_CLUSTER_NAME=cassandra-cluster + - CASSANDRA_DC=datacenter2 + - CASSANDRA_RACK=rack1 + - CASSANDRA_ENDPOINT_SNITCH=GossipingPropertyFileSnitch + - CASSANDRA_SEEDS=cassandra-seed-dc1-rack1-node1,cassandra-seed-dc2-rack1-node1,cassandra-node-dc1-rack1-node2,cassandra-node-dc2-rack1-node2 + - CASSANDRA_PASSWORD_SEEDER=yes + - CASSANDRA_PASSWORD=cassandra + - MAX_HEAP_SIZE=2G + - HEAP_NEWSIZE=200M + - LOCAL_JMX=no + - JVM_EXTRA_OPTS=-Dcom.sun.management.jmxremote.authenticate=false -Dcassandra.superuser_setup_delay_ms=0 -Dcassandra.skip_wait_for_gossip_to_settle=0 -Dcassandra.ring_delay_ms=0 + volumes: + - cassandra-node-dc2-rack1-node2-data:/var/lib/cassandra + - ./cassandra-rackdc-dc2-rack1.properties:/etc/cassandra/cassandra-rackdc.properties + +volumes: + cassandra-seed-dc1-rack1-node1-data: + cassandra-seed-dc2-rack1-node1-data: + cassandra-node-dc1-rack1-node2-data: + cassandra-node-dc2-rack1-node2-data: diff --git a/cassandra-test-image/src/main/docker/ecc-entrypoint.sh b/cassandra-test-image/src/main/docker/ecc-entrypoint.sh new file mode 100755 index 000000000..05650b00e --- /dev/null +++ b/cassandra-test-image/src/main/docker/ecc-entrypoint.sh @@ -0,0 +1,88 @@ +#!/bin/bash +# +# Copyright 2020 Telefonaktiebolaget LM Ericsson +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -e + +sed -i "s/authenticator: .*/authenticator: PasswordAuthenticator/g" "$CASSANDRA_CONF"/cassandra.yaml +# Start of for 5.X +sed -i "/^authenticator:/{n;s/class_name : .*/class_name : PasswordAuthenticator/}" "$CASSANDRA_CONF"/cassandra.yaml +# End of for 5.X +sed -i "s/^authorizer: .*/authorizer: CassandraAuthorizer/g" "$CASSANDRA_CONF"/cassandra.yaml + +sed -i "s/num_tokens: .*/num_tokens: 16/g" "$CASSANDRA_CONF"/cassandra.yaml +sed -i "s/auto_snapshot: .*/auto_snapshot: false/g" "$CASSANDRA_CONF"/cassandra.yaml + +mkdir -p ~/.cassandra + +cat < ~/.cassandra/cqlshrc +[authentication] +username = cassandra +password = cassandra + +EOF + +if [ -f /etc/certificates/.keystore ]; then +# +# Setup CQL certificates +# + sed -i "/client_encryption_options:/{n;s/enabled: false/enabled: true/}" "$CASSANDRA_CONF"/cassandra.yaml + + sed -i "s;keystore: .*;keystore: /etc/certificates/.keystore;g" "$CASSANDRA_CONF"/cassandra.yaml + sed -i "s/keystore_password: .*/keystore_password: ecctest/g" "$CASSANDRA_CONF"/cassandra.yaml + + sed -ri "s;(# )?truststore: .*;truststore: /etc/certificates/.truststore;g" "$CASSANDRA_CONF"/cassandra.yaml + sed -ri "s/(# )?truststore_password: .*/truststore_password: ecctest/g" "$CASSANDRA_CONF"/cassandra.yaml + + sed -ri "s/(# )?require_client_auth: false/require_client_auth: true/g" "$CASSANDRA_CONF"/cassandra.yaml + sed -ri "s/(# )?protocol: TLS/protocol: TLSv1.2/g" "$CASSANDRA_CONF"/cassandra.yaml + + cat <> ~/.cassandra/cqlshrc +[connection] +hostname = localhost +port = 9042 +ssl = true + +[ssl] +certfile = /etc/certificates/ca.crt +validate = true +userkey = /etc/certificates/key.pem +usercert = /etc/certificates/cert.crt +version = TLSv1_2 +EOF + +# +# Setup JMX certificates +# + +# Comment rmi port to choose randomly + sed -ri "s/(.*jmxremote.rmi.port.*)/#\1/g" "$CASSANDRA_CONF"/cassandra-env.sh + +# Enable secure transport + sed -ri "s/#(.*jmxremote.ssl=true)/\1/g" "$CASSANDRA_CONF"/cassandra-env.sh + sed -ri "s/#(.*jmxremote.ssl.need_client_auth=true)/\1/g" "$CASSANDRA_CONF"/cassandra-env.sh + +# Set protocol + sed -ri 's/#(.*jmxremote.ssl.enabled.protocols)=.*/\1=TLSv1.2"/g' "$CASSANDRA_CONF"/cassandra-env.sh + +# Set keystore/truststore properties + sed -ri 's;#(.*keyStore)=.*;\1=/etc/certificates/.keystore";g' "$CASSANDRA_CONF"/cassandra-env.sh + sed -ri 's;#(.*trustStore)=.*;\1=/etc/certificates/.truststore";g' "$CASSANDRA_CONF"/cassandra-env.sh + sed -ri 's/#(.*keyStorePassword)=.*/\1=ecctest"/g' "$CASSANDRA_CONF"/cassandra-env.sh + sed -ri 's/#(.*trustStorePassword)=.*/\1=ecctest"/g' "$CASSANDRA_CONF"/cassandra-env.sh + +fi + +docker-entrypoint.sh diff --git a/cassandra-test-image/src/main/docker/setup_db.sh b/cassandra-test-image/src/main/docker/setup_db.sh new file mode 100755 index 000000000..d80f33d34 --- /dev/null +++ b/cassandra-test-image/src/main/docker/setup_db.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# +# Copyright 2020 Telefonaktiebolaget LM Ericsson +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +cqlsh -u cassandra -p cassandra -f /etc/cassandra/create_keyspaces.cql +cqlsh -u cassandra -p cassandra -f /etc/cassandra/users.cql diff --git a/cassandra-test-image/src/main/docker/users.cql b/cassandra-test-image/src/main/docker/users.cql new file mode 100644 index 000000000..935af162c --- /dev/null +++ b/cassandra-test-image/src/main/docker/users.cql @@ -0,0 +1,5 @@ +CREATE ROLE IF NOT EXISTS eccuser WITH PASSWORD = 'eccpassword' AND LOGIN = true; + +GRANT SELECT ON KEYSPACE ecchronos TO eccuser; +GRANT MODIFY ON KEYSPACE ecchronos TO eccuser; +GRANT SELECT ON system_distributed.repair_history TO eccuser; diff --git a/cassandra-test-image/src/main/resources/generate_certificates.sh b/cassandra-test-image/src/main/resources/generate_certificates.sh new file mode 100755 index 000000000..c8cca5173 --- /dev/null +++ b/cassandra-test-image/src/main/resources/generate_certificates.sh @@ -0,0 +1,160 @@ +#!/bin/bash +# +# Copyright 2020 Telefonaktiebolaget LM Ericsson +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Output: +# Certs for CQL +# (ca/key.pem - CA private key) +# cert/ca.crt - CA certificate +# cert/.truststore - JKS (CA) truststore +# +# cert/key.pem - User private key +# cert/cert.crt - User certificate +# (cert/cert.csr - User certificate sign request) +# (cert/cert.pkcs12 - User cert in PKCS#12 format) +# cert/.keystore - JKS (user) keystore +# +# Certs for REST +# cert/serverca.crt - REST CA +# cert/servercakey.pem - REST CA private key +# cert/server.csr - REST server CSR +# cert/servercert.crt - REST server cert +# cert/serverkey.pem - REST server private key +# cert/serverkeystore - REST server keystore (PKCS12) +# cert/servertruststore - REST server truststore (PKCS12) +# +# Certs for ecctool +# cert/clientcert.crt - ecctool certificate +# cert/clientkey.pem - ecctool private key +# cert/client.csr +# + +mkdir -p ca +mkdir -p cert + +CA_KEY="ca/key.pem" +CA_CERT="cert/ca.crt" +TRUSTSTORE="cert/.truststore" + +USER_KEY="cert/key.pem" +USER_CERT="cert/cert.crt" +USER_CSR="cert/cert.csr" +USER_PKCS12="cert/cert.pkcs12" +KEYSTORE="cert/.keystore" + +############### +# Generate CA # +############### + +## Generate self-signed CA +openssl req -x509 -newkey rsa:2048 -nodes -days 1 -subj "/C=TE/ST=TEST/L=TEST/O=TEST/OU=TEST/CN=CA"\ + -keyout "$CA_KEY" -out "$CA_CERT" + +## Import self-signed CA certificate to truststore +keytool -importcert -noprompt\ + -file "$CA_CERT"\ + -keystore "$TRUSTSTORE" -storepass "ecctest" + + +######################## +# Generate certificate # +######################## + +## Generate user key and CSR +openssl req -newkey rsa:2048 -nodes -subj "/C=TE/ST=TEST/L=TEST/O=TEST/OU=TEST/CN=User"\ + -keyout "$USER_KEY" -out "$USER_CSR" + +# Sign user certificate +openssl x509 -req -sha256 -days 1\ + -in "$USER_CSR" -out "$USER_CERT"\ + -CA "$CA_CERT" -CAkey "$CA_KEY" -CAcreateserial + +## Convert key/certificate to PKCS12 format +openssl pkcs12 -export\ + -in "$USER_CERT" -inkey "$USER_KEY"\ + -out "$USER_PKCS12" -passout pass:"ecctest" + +## Import PKCS12 key to keystore +keytool -importkeystore\ + -srckeystore "$USER_PKCS12" -srcalias "1" -srcstorepass "ecctest"\ + -destkeystore "$KEYSTORE" -destalias "cert" -deststorepass "ecctest" + +SERVER_CA="cert/serverca.crt" +SERVER_CA_KEY="cert/servercakey.pem" +SERVER_CSR="cert/server.csr" +SERVER_CERT="cert/servercert.crt" +SERVER_CERT_KEY="cert/serverkey.pem" +SERVER_KEYSTORE="cert/serverkeystore" +SERVER_TRUSTSTORE="cert/servertruststore" + +ECCTOOL_CLIENT_CA="cert/clientca.crt" +ECCTOOL_CLIENT_CA_KEY="cert/clientcakey.pem" +ECCTOOL_CLIENT_CERT="cert/clientcert.crt" +ECCTOOL_CLIENT_CERT_KEY="cert/clientkey.pem" +ECCTOOL_CLIENT_CSR="cert/client.csr" + +###################### +# Generate Server CA # +###################### + +## Generate self-signed CA (this is the CA that the client should trust ad that we should issue server certificates from) +openssl req -x509 -newkey rsa:2048 -nodes -days 1 -subj "/C=TE/ST=TEST/L=TEST/O=TEST/OU=TEST/CN=RESTSERVERCA"\ + -keyout "$SERVER_CA_KEY" -out "$SERVER_CA" + +############################### +# Generate server certificate # +############################### + +## Generate server key and CSR +openssl req -newkey rsa:2048 -nodes -subj "/C=TE/ST=TEST/L=TEST/O=TEST/OU=TEST/CN=localhost"\ + -keyout "$SERVER_CERT_KEY" -out "$SERVER_CSR" + +# Sign server certificate +openssl x509 -req -sha256 -days 1\ + -in "$SERVER_CSR" -out "$SERVER_CERT"\ + -extfile <(printf "subjectAltName=DNS:localhost")\ + -CA "$SERVER_CA" -CAkey "$SERVER_CA_KEY" -CAcreateserial + +## Convert server key/certificate to PKCS12 format +openssl pkcs12 -export\ + -in "$SERVER_CERT" -inkey "$SERVER_CERT_KEY"\ + -out "$SERVER_KEYSTORE" -passout pass:"ecctest" + +############################## +# Generate ecctool client CA # +############################## + +## Generate self-signed client CA (this is the CA that the server should trust and that we should issue client certificates from) +openssl req -x509 -newkey rsa:2048 -nodes -days 1 -subj "/C=TE/ST=TEST/L=TEST/O=TEST/OU=TEST/CN=RESTCLIENTCA"\ + -keyout "$ECCTOOL_CLIENT_CA_KEY" -out "$ECCTOOL_CLIENT_CA" + +## Import self-signed client CA certificate to server truststore +keytool -importcert -noprompt\ + -file "$ECCTOOL_CLIENT_CA"\ + -keystore "$SERVER_TRUSTSTORE" -storepass "ecctest" + +####################################### +# Generate ecctool client certificate # +####################################### + +## Generate ecctool key and CSR +openssl req -newkey rsa:2048 -nodes -subj "/C=TE/ST=TEST/L=TEST/O=TEST/OU=TEST/CN=ecctool"\ + -keyout "$ECCTOOL_CLIENT_CERT_KEY" -out "$ECCTOOL_CLIENT_CSR" + +# Sign ecctool certificate +openssl x509 -req -sha256 -days 1\ + -in "$ECCTOOL_CLIENT_CSR" -out "$ECCTOOL_CLIENT_CERT"\ + -CA "$ECCTOOL_CLIENT_CA" -CAkey "$ECCTOOL_CLIENT_CA_KEY" -CAcreateserial diff --git a/cassandra-test-image/src/test/java/cassandracluster/AbstractCassandraCluster.java b/cassandra-test-image/src/test/java/cassandracluster/AbstractCassandraCluster.java new file mode 100644 index 000000000..5644c438d --- /dev/null +++ b/cassandra-test-image/src/test/java/cassandracluster/AbstractCassandraCluster.java @@ -0,0 +1,66 @@ +/* + * Copyright 2024 Telefonaktiebolaget LM Ericsson + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package cassandracluster; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.CqlSessionBuilder; +import java.net.InetSocketAddress; +import java.nio.file.Path; +import java.nio.file.Paths; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.testcontainers.containers.DockerComposeContainer; + +public class AbstractCassandraCluster +{ + private static DockerComposeContainer composeContainer; + protected static String containerIP; + protected static CqlSession mySession; + + @BeforeClass + public static void setup() throws InterruptedException + { + Path dockerComposePath = Paths.get("") + .toAbsolutePath() + .getParent() + .resolve("cassandra-test-image/src/main/docker/docker-compose.yml"); + System.out.println("DockerComposePath que veio foi" + dockerComposePath.toFile()); + composeContainer = new DockerComposeContainer<>(dockerComposePath.toFile()); + composeContainer.start(); + + System.out.println("Waiting Cassandra Cluster finish to start"); + Thread.sleep(50000); + + containerIP = composeContainer.getContainerByServiceName("cassandra-seed-dc1-rack1-node1").get() + .getContainerInfo() + .getNetworkSettings().getNetworks().values().stream().findFirst().get().getIpAddress(); + CqlSessionBuilder builder = CqlSession.builder() + .addContactPoint(new InetSocketAddress(containerIP, 9042)) + .withLocalDatacenter("datacenter1") + .withAuthCredentials("cassandra", "cassandra"); + mySession = builder.build(); + } + + @AfterClass + public static void tearDownCluster() + { + if (mySession != null) + { + mySession.close(); + } + composeContainer.stop(); + } +} + diff --git a/cassandra-test-image/src/test/java/cassandracluster/TestCassandraCluster.java b/cassandra-test-image/src/test/java/cassandracluster/TestCassandraCluster.java new file mode 100644 index 000000000..6c78224ab --- /dev/null +++ b/cassandra-test-image/src/test/java/cassandracluster/TestCassandraCluster.java @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Telefonaktiebolaget LM Ericsson + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package cassandracluster; + + import com.datastax.oss.driver.api.core.cql.ResultSet; + import org.junit.Test; + + + import static org.junit.Assert.assertTrue; + + public class TestCassandraCluster extends AbstractCassandraCluster + { + @Test + public void testCassandraCluster() + { + mySession.execute( + "CREATE KEYSPACE IF NOT EXISTS test_keyspace WITH replication = {'class': 'NetworkTopologyStrategy', " + + "'datacenter1': 2}"); + mySession.execute("CREATE TABLE IF NOT EXISTS test_keyspace.test_table (id UUID PRIMARY KEY, value text)"); + ResultSet resultSet = mySession.execute("INSERT INTO test_keyspace.test_table (id, value) VALUES (uuid(), 'Test " + + "value')"); + + assertTrue(resultSet.wasApplied()); + } + } diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md index a5d6a4145..8b57cc93d 100644 --- a/docs/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -1,54 +1,114 @@ # Architecture ## Summary + - [Overview](#overview) - [Concepts](#concepts) - - [Leases](#leases) - - [Scheduling flow](#scheduling-flow) - - [Scheduled jobs](#scheduled-jobs) - - [Run policies](#run-policies) - - [Repair scheduling](#repair-scheduling) - - [Vnode repairs](#vnode-repairs) + - [Leases](#leases) + - [Scheduling flow](#scheduling-flow) + - [Scheduled jobs](#scheduled-jobs) + - [Run policies](#run-policies) + - [Repair scheduling](#repair-scheduling) + - [Vnode repairs](#vnode-repairs) - [Sub-range repairs](#sub-range-repairs) - - [Example](#example) - - [Repair history](#repair-history) + - [Example](#example) + - [Repair history](#repair-history) - [Incremental repairs](#incremental-repairs) - [References](#references) ## Overview -ecChronos is built to be continuously repairing data in the background. Each ecChronos instance keeps track of the repairs -of a single cassandra node. A lock table in cassandra makes sure that only a subset of repairs run at any one time. Repairs -can be configured to run only during certain time periods or not at all. Settings for backpressure are provided to -make sure repair is spread out over the interval time while alarms are provided to signal when a job has not run for -longer than expected. + +ecChronos is built to be continuously repairing data in the background. Each ecChronos instance keeps track of the repairs of a group of specified Cassandra nodes, based on the agent type.
```mermaid graph TB - c[ecChronos Instance]---C((Cassandra Node)); - - a[ecChronos Instance]---A((Cassandra Node)); - - A((Cassandra Node))===B((Cassandra Node)); - - A((Cassandra Node))===C((Cassandra Node)); - - C((Cassandra Node))===D((Cassandra Node)) - - b[ecChronos Instance]---B((Cassandra Node)) - - B((Cassandra Node))===D((Cassandra Node)) - - D((Cassandra Node))---d[ecChronos Instance] + a[datacenter1]---A((Cassandra Node)); + a[datacenter1]---B((Cassandra Node)); + b[datacenter2]---C((Cassandra Node)); + b[datacenter2]---D((Cassandra Node)); + A((Cassandra Node))---d[ecChronos Agent]; + B((Cassandra Node))---d[ecChronos Agent]; + C((Cassandra Node))---d[ecChronos Agent]; + D((Cassandra Node))---d[ecChronos Agent]; ```
Figure 1: ecChronos and Cassandra Nodes.
## Concepts +### Connection + +Once ecChronos establishes its initial connection with the `contactPoints`, it must register its control over the nodes based on the `type` property, whether JMX or CQL, to make it clear that a single instance will be responsible for managing multiple nodes. Then it would be possible to keep track of what was the last time that the EcChronos instances was able to connect with a node, also for others EcChronos instances keep track about each other's health. + +If type is `datacenterAware`, ecChronos will register its control over all the nodes in the specified datacenter; The `rackAware` declares that ecChronos is responsible just for a sub-set of racks in the declared list; The `hostAware` funcionality declares that ecChronos is resposible just for the specified hosts list. When connection.cql.agent.enabled is true, it must use the AgentNativeConnectionProvider class as default provider. + +Configuration is available on ecc.yml in the below format. + +```yaml +connection: + cql: + agent: + type: datacenterAware + contactPoints: + - host: 127.0.0.1 + port: 9042 + - host: 127.0.0.2 + port: 9042 + datacenterAware: + datacenters: + - name: datacenter1 + - name: datacenter2 + rackAware: + racks: + - datacenterName: datacenter1 + rackName: rack1 + - datacenterName: datacenter1 + rackName: rack2 + hostAware: + hosts: + - host: 127.0.0.1 + port: 9042 + - host: 127.0.0.2 + port: 9042 + - host: 127.0.0.3 + port: 9042 + - host: 127.0.0.4 + port: 9042 + provider: com.ericsson.bss.cassandra.ecchronos.application.AgentNativeConnectionProvider +``` + +### Nodes Sync + +To keep track about nodes and instances, the Agent implementation uses the table nodes_sync, that declared nodes by specific ecChronos instances, so to use the Agent is mandatory to create the table below: + +```cql +CREATE TABLE ecchronos.nodes_sync +( + ecchronos_id TEXT, + datacenter_name TEXT, + node_id UUID, + node_endpoint TEXT, + node_status TEXT, + last_connection TIMESTAMP, + next_connection TIMESTAMP, + PRIMARY KEY + ( + ecchronos_id, + datacenter_name, + node_id + ) +) WITH CLUSTERING ORDER BY( + datacenter_name DESC, + node_id DESC +); +``` + ### Leases +A lock table in cassandra makes sure that only a subset of repairs run at any one time. Repairs can be configured to run only during certain time periods or not at all. Settings for backpressure are provided to make sure repair is spread out over the interval time while alarms are provided to signal when a job has not run for longer than expected. +
```mermaid @@ -175,10 +235,11 @@ This is activated by specifying a target repair size in the [RepairConfiguration For the standalone version the option is called `repair.size.target` in the configuration file. Each sub-range repair session will aim to handle the target amount of data. -*Note: Without this option specified the repair mechanism will handle full virtual nodes only (including how it interprets the repair history)* -*Note: The target repair size is assuming a uniform data distribution across partitions on the local node* +_Note: Without this option specified the repair mechanism will handle full virtual nodes only (including how it interprets the repair history)* +_Note: The target repair size is assuming a uniform data distribution across partitions on the local node* + +### Example -### Example With a local table containing 100 bytes of data and a total of 100 tokens locally in a continuous range (0, 100]. When the repair target is set to 2 bytes the range will be divided into 50 sub ranges, each handling two tokens. The sub ranges would be: @@ -204,9 +265,9 @@ In a larger context this also works for repairs covering the full virtual node. Given a virtual node (0, 30] that was repaired at timestamp X and a repair history entry containing the sub range (15, 20] repaired at Y. Assuming that X is more than one hour before Y this will produce three sub ranges in the internal representation: -* (0, 15] repaired at X. -* (15, 20] repaired at Y -* (20, 30] repaired at X +- (0, 15] repaired at X. +- (15, 20] repaired at Y +- (20, 30] repaired at X ## Incremental repairs @@ -225,6 +286,7 @@ The IncrementalRepairTask is the class that will perform the incremental repair [i96]: https://github.com/Ericsson/ecchronos/issues/96 ## References + [1\]: [Consensus on Cassandra](https://www.datastax.com/blog/consensus-cassandra); [2\]: [Incremental and Full Repairs](https://cassandra.apache.org/doc/latest/cassandra/operating/repair.html#incremental-and-full-repairs) diff --git a/docs/TESTS.md b/docs/TESTS.md index a9748df37..0848ed623 100644 --- a/docs/TESTS.md +++ b/docs/TESTS.md @@ -24,9 +24,29 @@ They are running simple tests that sometimes utilize a single embedded Cassandra ### Docker tests -The acceptance tests and integration tests use docker instances by default. -The docker containers gets started when the `-P docker-integration-test` flag is used in maven. -The docker command must be runnable without *sudo* for the user running the tests. +The current test setup employs Testcontainers to create a simplified Cassandra instance for unit testing and a full Cassandra cluster for integration and manual tests. To conduct manual tests, you can utilize the template provided in the [cassandra-test-image](../cassandra-test-image/src/main/docker/docker-compose.yml). To set up the cluster, navigate to the directory containing the docker-compose.yml file and execute the following command: + +```bash +docker-compose -f docker-compose.yml up --build +``` + +For integration tests, the system uses the [AbstractCassandraCluster](../cassandra-test-image/src/test/java/cassandracluster/AbstractCassandraCluster.java) class. This class can be imported into other modules by including the cassandra-test-image test package in the `pom.xml`, as follows: + +```xml + + com.ericsson.bss.cassandra.ecchronos + cassandra-test-image + tests + ${project.version} + test + +``` + +Before executing tests in other modules, ensure that the cassandra-test-image JAR file has been generated. This can be done by running the following Maven command: + +```bash +mvn package -Dbuild-cassandra-test-jar -DskipTests +``` ### Integration tests diff --git a/pom.xml b/pom.xml index af7b56c06..62a10baaa 100644 --- a/pom.xml +++ b/pom.xml @@ -69,6 +69,7 @@ + cassandra-test-image connection connection.impl application @@ -541,6 +542,7 @@ limitations under the License. check_style.xml .github/workflows/actions.yml .github/workflows/scorecard.yml + **/docker/* From d9439b7730d9caac569c5b9066bed4aeb41fcd53 Mon Sep 17 00:00:00 2001 From: VictorCavichioli Date: Mon, 2 Sep 2024 08:58:41 -0300 Subject: [PATCH 2/2] Fix Mistakes on Docs and Headers --- cassandra-test-image/src/main/docker/ecc-entrypoint.sh | 2 +- cassandra-test-image/src/main/docker/setup_db.sh | 2 +- .../src/main/resources/generate_certificates.sh | 2 +- .../java/cassandracluster/AbstractCassandraCluster.java | 6 ++++-- docs/ARCHITECTURE.md | 8 ++++---- 5 files changed, 11 insertions(+), 9 deletions(-) diff --git a/cassandra-test-image/src/main/docker/ecc-entrypoint.sh b/cassandra-test-image/src/main/docker/ecc-entrypoint.sh index 05650b00e..5df0e3f16 100755 --- a/cassandra-test-image/src/main/docker/ecc-entrypoint.sh +++ b/cassandra-test-image/src/main/docker/ecc-entrypoint.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2020 Telefonaktiebolaget LM Ericsson +# Copyright 2024 Telefonaktiebolaget LM Ericsson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra-test-image/src/main/docker/setup_db.sh b/cassandra-test-image/src/main/docker/setup_db.sh index d80f33d34..883c57207 100755 --- a/cassandra-test-image/src/main/docker/setup_db.sh +++ b/cassandra-test-image/src/main/docker/setup_db.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2020 Telefonaktiebolaget LM Ericsson +# Copyright 2024 Telefonaktiebolaget LM Ericsson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra-test-image/src/main/resources/generate_certificates.sh b/cassandra-test-image/src/main/resources/generate_certificates.sh index c8cca5173..e90e7d284 100755 --- a/cassandra-test-image/src/main/resources/generate_certificates.sh +++ b/cassandra-test-image/src/main/resources/generate_certificates.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2020 Telefonaktiebolaget LM Ericsson +# Copyright 2024 Telefonaktiebolaget LM Ericsson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra-test-image/src/test/java/cassandracluster/AbstractCassandraCluster.java b/cassandra-test-image/src/test/java/cassandracluster/AbstractCassandraCluster.java index 5644c438d..3f9d9f98d 100644 --- a/cassandra-test-image/src/test/java/cassandracluster/AbstractCassandraCluster.java +++ b/cassandra-test-image/src/test/java/cassandracluster/AbstractCassandraCluster.java @@ -22,10 +22,13 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.testcontainers.containers.DockerComposeContainer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class AbstractCassandraCluster { private static DockerComposeContainer composeContainer; + private static final Logger LOG = LoggerFactory.getLogger(AbstractCassandraCluster.class); protected static String containerIP; protected static CqlSession mySession; @@ -36,11 +39,10 @@ public static void setup() throws InterruptedException .toAbsolutePath() .getParent() .resolve("cassandra-test-image/src/main/docker/docker-compose.yml"); - System.out.println("DockerComposePath que veio foi" + dockerComposePath.toFile()); composeContainer = new DockerComposeContainer<>(dockerComposePath.toFile()); composeContainer.start(); - System.out.println("Waiting Cassandra Cluster finish to start"); + LOG.info("Waiting for the Cassandra cluster to finish starting up."); Thread.sleep(50000); containerIP = composeContainer.getContainerByServiceName("cassandra-seed-dc1-rack1-node1").get() diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md index 8b57cc93d..58d55d713 100644 --- a/docs/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -40,9 +40,9 @@ ecChronos is built to be continuously repairing data in the background. Each ecC ### Connection -Once ecChronos establishes its initial connection with the `contactPoints`, it must register its control over the nodes based on the `type` property, whether JMX or CQL, to make it clear that a single instance will be responsible for managing multiple nodes. Then it would be possible to keep track of what was the last time that the EcChronos instances was able to connect with a node, also for others EcChronos instances keep track about each other's health. +Once ecChronos establishes its initial connection with the `contactPoints`, it must register its control over the nodes based on the `type` property, whether JMX or CQL, to make it clear that a single instance will be responsible for managing multiple nodes. Then it would be possible to keep track of what was the last time that the ecChronos instances was able to connect with a node, also for others ecChronos instances keep track about each other's health. -If type is `datacenterAware`, ecChronos will register its control over all the nodes in the specified datacenter; The `rackAware` declares that ecChronos is responsible just for a sub-set of racks in the declared list; The `hostAware` funcionality declares that ecChronos is resposible just for the specified hosts list. When connection.cql.agent.enabled is true, it must use the AgentNativeConnectionProvider class as default provider. +If type is `datacenterAware`, ecChronos will register its control over all the nodes in the specified datacenter; The `rackAware` declares ecChronos is responsible just for a sub-set of racks in the declared list; The `hostAware` funcionality declares ecChronos is resposible just for the specified hosts list. When connection.cql.agent.enabled is true, it must use the AgentNativeConnectionProvider class as default provider. Configuration is available on ecc.yml in the below format. @@ -81,7 +81,7 @@ connection: ### Nodes Sync -To keep track about nodes and instances, the Agent implementation uses the table nodes_sync, that declared nodes by specific ecChronos instances, so to use the Agent is mandatory to create the table below: +To keep track about nodes and instances, the Agent implementation uses the table nodes_sync, that declare nodes by specific ecChronos instances, so to use the Agent is mandatory to create the table below: ```cql CREATE TABLE ecchronos.nodes_sync @@ -107,7 +107,7 @@ CREATE TABLE ecchronos.nodes_sync ### Leases -A lock table in cassandra makes sure that only a subset of repairs run at any one time. Repairs can be configured to run only during certain time periods or not at all. Settings for backpressure are provided to make sure repair is spread out over the interval time while alarms are provided to signal when a job has not run for longer than expected. +A lock table in Cassandra makes sure only a subset of repairs run at any one time. Repairs can be configured to run only during certain time periods or not at all. Settings for backpressure are provided to make sure repair is spread out over the interval time while alarms are provided to signal when a job has not run for longer than expected.