From 91a93dacb84eae4f09decbabe54771585d42b570 Mon Sep 17 00:00:00 2001 From: Rampreeth Ethiraj Date: Tue, 18 Feb 2025 00:07:48 +0530 Subject: [PATCH 01/48] Fix Segment Replication stats throwing NPE (#14580) Signed-off-by: Rampreeth Ethiraj --- .../java/org/opensearch/index/seqno/ReplicationTracker.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index c0bb52b6b43bc..76ef45158e3d5 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -1254,8 +1254,9 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { // skip any shard that is a relocating primary or search only replica (not tracked by primary) private boolean shouldSkipReplicationTimer(String allocationId) { - Optional shardRouting = routingTable.shards() + Optional shardRouting = routingTable.assignedShards() .stream() + .filter(routing -> Objects.nonNull(routing.allocationId())) .filter(routing -> routing.allocationId().getId().equals(allocationId)) .findAny(); return shardRouting.isPresent() && (shardRouting.get().primary() || shardRouting.get().isSearchOnly()); From e62bf1a6b5e87cf6d138ddeecfca255fe0c4aa07 Mon Sep 17 00:00:00 2001 From: "Samuel.G" <1148690954@qq.com> Date: Wed, 19 Feb 2025 02:23:05 +0900 Subject: [PATCH 02/48] Wildcard field use only 3-gram to index (#17349) * support 3gram wildcard Signed-off-by: gesong.samuel * add changelog-3 Signed-off-by: gesong.samuel * add rolling upgrade test for wildcard field Signed-off-by: gesong.samuel * remove test case added in #16827 Signed-off-by: gesong.samuel --------- Signed-off-by: gesong.samuel Co-authored-by: gesong.samuel --- CHANGELOG-3.0.md | 1 + .../test/mixed_cluster/40_wildcard.yml | 200 +++++++++++++++ .../test/old_cluster/40_wildcard.yml | 235 ++++++++++++++++++ .../test/upgraded_cluster/40_wildcard.yml | 200 +++++++++++++++ .../index/mapper/WildcardFieldMapper.java | 134 ++++------ .../mapper/WildcardFieldMapperTests.java | 37 +-- .../index/mapper/WildcardFieldTypeTests.java | 38 +-- 7 files changed, 715 insertions(+), 130 deletions(-) create mode 100644 qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_wildcard.yml create mode 100644 qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_wildcard.yml create mode 100644 qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_wildcard.yml diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index c5f9611910fa9..bc5e63dbdf8ce 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -37,6 +37,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Stop minimizing automata used for case-insensitive matches ([#17268](https://github.com/opensearch-project/OpenSearch/pull/17268)) - Refactor the `:server` module `org.opensearch.client` to `org.opensearch.transport.client` to eliminate top level split packages for JPMS support ([#17272](https://github.com/opensearch-project/OpenSearch/pull/17272)) - Use Lucene `BM25Similarity` as default since the `LegacyBM25Similarity` is marked as deprecated ([#17306](https://github.com/opensearch-project/OpenSearch/pull/17306)) +- Wildcard field index only 3gram of the input data [#17349](https://github.com/opensearch-project/OpenSearch/pull/17349) ### Deprecated diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_wildcard.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_wildcard.yml new file mode 100644 index 0000000000000..e06854af7e924 --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_wildcard.yml @@ -0,0 +1,200 @@ +# refactored from rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml +--- +"search on mixed state": + # "term query matches exact value" + - do: + search: + index: test + body: + query: + term: + my_field: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + - do: + search: + index: test + body: + query: + term: + my_field.doc_values: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + # term query matches lowercase-normalized value + - do: + search: + index: test + body: + query: + term: + my_field.lower: "abcd" + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } + + - do: + search: + index: test + body: + query: + term: + my_field.lower: "ABCD" + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } + + - do: + search: + index: test + body: + query: + term: + my_field: "abcd" + - match: { hits.total.value: 0 } + + # wildcard query matches + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*Node*Exception*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + # wildcard query matches lowercase-normalized field + - do: + search: + index: test + body: + query: + wildcard: + my_field.lower: + value: "*node*exception*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field.lower: + value: "*NODE*EXCEPTION*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*node*exception*" + - match: { hits.total.value: 0 } + + # prefix query matches + - do: + search: + index: test + body: + query: + prefix: + my_field: + value: "[2024-06-08T" + - match: { hits.total.value: 3 } + + # regexp query matches + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*06-08.*cluster-manager node.*" + - match: { hits.total.value: 2 } + + # regexp query matches lowercase-normalized field + - do: + search: + index: test + body: + query: + regexp: + my_field.lower: + value: ".*06-08.*Cluster-Manager Node.*" + - match: { hits.total.value: 2 } + + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*06-08.*Cluster-Manager Node.*" + - match: { hits.total.value: 0 } + + # wildcard match-all works + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*" + - match: { hits.total.value: 6 } + + # regexp match-all works + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*" + - match: { hits.total.value: 6 } + + # terms query on wildcard field matches + - do: + search: + index: test + body: + query: + terms: { my_field: [ "AbCd" ] } + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + # case insensitive query on wildcard field + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "AbCd" + case_insensitive: true + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_wildcard.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_wildcard.yml new file mode 100644 index 0000000000000..b19882c69ddd7 --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_wildcard.yml @@ -0,0 +1,235 @@ +# refactored from rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml +--- +"Create index with Wildcard field": + - do: + indices.create: + index: test + body: + mappings: + properties: + my_field: + type: wildcard + fields: + lower: + type: wildcard + normalizer: lowercase + doc_values: + type: wildcard + doc_values: true + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test", "_id":1}}' + - '{"my_field": "org.opensearch.transport.NodeDisconnectedException: [node_s0][127.0.0.1:39953][disconnected] disconnected"}' + - '{"index": {"_index": "test", "_id":2}}' + - '{"my_field": "[2024-06-08T06:31:37,443][INFO ][o.o.c.c.Coordinator ] [node_s2] cluster-manager node [{node_s0}{Nj7FjR7hRP2lh_zur8KN_g}{OTGOoWmmSsWP_RQ3tIKJ9g}{127.0.0.1}{127.0.0.1:39953}{imr}{shard_indexing_pressure_enabled=true}] failed, restarting discovery"}' + - '{"index": {"_index": "test", "_id":3}}' + - '{"my_field": "[2024-06-08T06:31:37,451][INFO ][o.o.c.s.ClusterApplierService] [node_s2] cluster-manager node changed {previous [{node_s0}{Nj7FjR7hRP2lh_zur8KN_g}{OTGOoWmmSsWP_RQ3tIKJ9g}{127.0.0.1}{127.0.0.1:39953}{imr}{shard_indexing_pressure_enabled=true}], current []}, term: 1, version: 24, reason: becoming candidate: onLeaderFailure"}' + - '{"index": {"_index": "test", "_id":4}}' + - '{"my_field": "[2024-06-08T06:31:37,452][WARN ][o.o.c.NodeConnectionsService] [node_s1] failed to connect to {node_s0}{Nj7FjR7hRP2lh_zur8KN_g}{OTGOoWmmSsWP_RQ3tIKJ9g}{127.0.0.1}{127.0.0.1:39953}{imr}{shard_indexing_pressure_enabled=true} (tried [1] times)"}' + - '{"index": {"_index": "test", "_id":5}}' + - '{"my_field": "AbCd"}' + - '{"index": {"_index": "test", "_id":6}}' + - '{"other_field": "test"}' + - '{"index": {"_index": "test", "_id":7}}' + - '{"my_field": "ABCD"}' + + # "term query matches exact value" + - do: + search: + index: test + body: + query: + term: + my_field: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + - do: + search: + index: test + body: + query: + term: + my_field.doc_values: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + # term query matches lowercase-normalized value + - do: + search: + index: test + body: + query: + term: + my_field.lower: "abcd" + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } + + - do: + search: + index: test + body: + query: + term: + my_field.lower: "ABCD" + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } + + - do: + search: + index: test + body: + query: + term: + my_field: "abcd" + - match: { hits.total.value: 0 } + + # wildcard query matches + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*Node*Exception*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + # wildcard query matches lowercase-normalized field + - do: + search: + index: test + body: + query: + wildcard: + my_field.lower: + value: "*node*exception*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field.lower: + value: "*NODE*EXCEPTION*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*node*exception*" + - match: { hits.total.value: 0 } + + # prefix query matches + - do: + search: + index: test + body: + query: + prefix: + my_field: + value: "[2024-06-08T" + - match: { hits.total.value: 3 } + + # regexp query matches + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*06-08.*cluster-manager node.*" + - match: { hits.total.value: 2 } + + # regexp query matches lowercase-normalized field + - do: + search: + index: test + body: + query: + regexp: + my_field.lower: + value: ".*06-08.*Cluster-Manager Node.*" + - match: { hits.total.value: 2 } + + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*06-08.*Cluster-Manager Node.*" + - match: { hits.total.value: 0 } + + # wildcard match-all works + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*" + - match: { hits.total.value: 6 } + + # regexp match-all works + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*" + - match: { hits.total.value: 6 } + + # terms query on wildcard field matches + - do: + search: + index: test + body: + query: + terms: { my_field: [ "AbCd" ] } + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + # case insensitive query on wildcard field + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "AbCd" + case_insensitive: true + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_wildcard.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_wildcard.yml new file mode 100644 index 0000000000000..29518931a5b8b --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_wildcard.yml @@ -0,0 +1,200 @@ +# refactored from rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml +--- +"search after upgrade": + # "term query matches exact value" + - do: + search: + index: test + body: + query: + term: + my_field: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + - do: + search: + index: test + body: + query: + term: + my_field.doc_values: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + # term query matches lowercase-normalized value + - do: + search: + index: test + body: + query: + term: + my_field.lower: "abcd" + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } + + - do: + search: + index: test + body: + query: + term: + my_field.lower: "ABCD" + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } + + - do: + search: + index: test + body: + query: + term: + my_field: "abcd" + - match: { hits.total.value: 0 } + + # wildcard query matches + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*Node*Exception*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + # wildcard query matches lowercase-normalized field + - do: + search: + index: test + body: + query: + wildcard: + my_field.lower: + value: "*node*exception*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field.lower: + value: "*NODE*EXCEPTION*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*node*exception*" + - match: { hits.total.value: 0 } + + # prefix query matches + - do: + search: + index: test + body: + query: + prefix: + my_field: + value: "[2024-06-08T" + - match: { hits.total.value: 3 } + + # regexp query matches + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*06-08.*cluster-manager node.*" + - match: { hits.total.value: 2 } + + # regexp query matches lowercase-normalized field + - do: + search: + index: test + body: + query: + regexp: + my_field.lower: + value: ".*06-08.*Cluster-Manager Node.*" + - match: { hits.total.value: 2 } + + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*06-08.*Cluster-Manager Node.*" + - match: { hits.total.value: 0 } + + # wildcard match-all works + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*" + - match: { hits.total.value: 6 } + + # regexp match-all works + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*" + - match: { hits.total.value: 6 } + + # terms query on wildcard field matches + - do: + search: + index: test + body: + query: + terms: { my_field: [ "AbCd" ] } + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + # case insensitive query on wildcard field + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "AbCd" + case_insensitive: true + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } diff --git a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java index 07dbe695bbbbb..20c5ce87ad1c7 100644 --- a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java @@ -159,6 +159,7 @@ public WildcardFieldMapper build(BuilderContext context) { } + public static final int NGRAM_SIZE = 3; public static final String CONTENT_TYPE = "wildcard"; public static final TypeParser PARSER = new TypeParser((n, c) -> new WildcardFieldMapper.Builder(n, c.getIndexAnalyzers())); @@ -230,97 +231,49 @@ protected void parseCreateField(ParseContext context) throws IOException { /** * Tokenizer to emit tokens to support wildcard first-phase matching. *

- * Will emit all substrings of length 1,2, and 3, with 0-valued anchors for the prefix/suffix. + * Will emit all substrings of only 3, with 0-valued anchors for the prefix/suffix. *

* For example, given the string "lucene", output the following terms: *

- * [0, 'l'] + * [0, 0, 'l'] * [0, 'l', 'u'] - * ['l'] - * ['l', 'u'] * ['l', 'u', 'c'] - * ['u'] - * ['u','c'] * ['u','c','e'] - * ['c'] - * ['c', 'e'] * ['c', 'e', 'n'] - * ['e'] - * ['e', 'n'] * ['e', 'n', 'e'] - * ['n'] - * ['n', 'e'] * ['n', 'e', 0] - * ['e'] - * ['e', 0] + * ['e', 0, 0] *

* Visible for testing. */ static final class WildcardFieldTokenizer extends Tokenizer { private final CharTermAttribute charTermAttribute = addAttribute(CharTermAttribute.class); - private final char[] buffer = new char[3]; // Ring buffer for up to 3 chars - private int offset = 0; // Position in the buffer - private int length = 2; // First token is anchor + first char + private final char[] buffer = new char[NGRAM_SIZE]; // Ring buffer for up to 3 chars + private int offset = NGRAM_SIZE - 1; // next position in buffer to store next input char @Override public void reset() throws IOException { super.reset(); - buffer[0] = 0; - int firstChar = input.read(); - if (firstChar != -1) { - buffer[1] = (char) firstChar; - int secondChar = input.read(); - if (secondChar != -1) { - buffer[2] = (char) secondChar; - } else { - buffer[2] = 0; - } - } else { - buffer[1] = 0; + for (int i = 0; i < NGRAM_SIZE - 1; i++) { + buffer[i] = 0; } - } @Override public boolean incrementToken() throws IOException { - charTermAttribute.setLength(length); - int numZeroes = 0; - for (int i = 0; i < length; i++) { - char curChar = buffer[(i + offset) % 3]; - if (curChar == 0) { - numZeroes++; - } - charTermAttribute.buffer()[i] = buffer[(i + offset) % 3]; - } - if (numZeroes == 2) { - // Two zeroes usually means we're done. - if (length == 3 && charTermAttribute.buffer()[1] != 0) { - // The only case where we're not done is if the input has exactly 1 character, so the buffer - // contains 0, char, 0. In that case, we return char now, then return char, 0 on the next iteration - charTermAttribute.buffer()[0] = charTermAttribute.buffer()[1]; - charTermAttribute.buffer()[1] = 0; - charTermAttribute.setLength(1); - length = 2; - offset = 1; - return true; - } - return false; - } - if (length == 3) { - // Read the next character, overwriting the current offset - int nextChar = input.read(); - if (nextChar != -1) { - buffer[offset] = (char) nextChar; - } else { - // End of input. Pad with extra 0 to trigger the logic above. - buffer[offset] = 0; - } - offset = (offset + 1) % 3; - length = 1; - } else { - length = length + 1; + charTermAttribute.setLength(NGRAM_SIZE); + int c = input.read(); + c = c == -1 ? 0 : c; + + buffer[offset++ % NGRAM_SIZE] = (char) c; + boolean has_next = false; + for (int i = 0; i < NGRAM_SIZE; i++) { + char curChar = buffer[(offset + i) % NGRAM_SIZE]; + charTermAttribute.buffer()[i] = curChar; + has_next |= curChar != 0; } - return true; + + return has_next; } } @@ -479,8 +432,8 @@ public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, bo Query approximation; if (requiredNGrams.isEmpty()) { // This only happens when all characters are wildcard characters (* or ?), - // or it's the empty string. - if (value.length() == 0 || value.contains("?")) { + // or it's only contains sequential characters less than NGRAM_SIZE (which defaults to 3). + if (findNonWildcardSequence(value, 0) != value.length() || value.length() == 0 || value.contains("?")) { approximation = this.existsQuery(context); } else { return existsQuery(context); @@ -502,15 +455,20 @@ static Set getRequiredNGrams(String value, boolean regexpMode) { int pos = 0; String rawSequence = null; String currentSequence = null; + char[] buffer = new char[NGRAM_SIZE]; if (!value.startsWith("?") && !value.startsWith("*")) { // Can add prefix term rawSequence = getNonWildcardSequence(value, 0); currentSequence = performEscape(rawSequence, regexpMode); - if (currentSequence.length() == 1) { - terms.add(new String(new char[] { 0, currentSequence.charAt(0) })); - } else { - terms.add(new String(new char[] { 0, currentSequence.charAt(0), currentSequence.charAt(1) })); + + // buffer[0] is automatically set to 0 + Arrays.fill(buffer, (char) 0); + int startIdx = Math.max(NGRAM_SIZE - currentSequence.length(), 1); + for (int j = 0; j < currentSequence.length() && j < NGRAM_SIZE - 1; j++) { + buffer[startIdx + j] = currentSequence.charAt(j); } + + terms.add(new String(buffer)); } else { pos = findNonWildcardSequence(value, pos); rawSequence = getNonWildcardSequence(value, pos); @@ -518,23 +476,27 @@ static Set getRequiredNGrams(String value, boolean regexpMode) { while (pos < value.length()) { boolean isEndOfValue = pos + rawSequence.length() == value.length(); currentSequence = performEscape(rawSequence, regexpMode); - if (!currentSequence.isEmpty() && currentSequence.length() < 3 && !isEndOfValue && pos > 0) { - // If this is a prefix or suffix of length < 3, then we already have a longer token including the anchor. - terms.add(currentSequence); - } else { - for (int i = 0; i < currentSequence.length() - 2; i++) { - terms.add(currentSequence.substring(i, i + 3)); - } + + for (int i = 0; i < currentSequence.length() - NGRAM_SIZE + 1; i++) { + terms.add(currentSequence.substring(i, i + 3)); } if (isEndOfValue) { // This is the end of the input. We can attach a suffix anchor. - if (currentSequence.length() == 1) { - terms.add(new String(new char[] { currentSequence.charAt(0), 0 })); - } else { - char a = currentSequence.charAt(currentSequence.length() - 2); - char b = currentSequence.charAt(currentSequence.length() - 1); - terms.add(new String(new char[] { a, b, 0 })); + // special case when we should generate '0xxxxxxx0', where we have (NGRAM_SIZE - 2) * x + Arrays.fill(buffer, (char) 0); + if (pos == 0 && currentSequence.length() == NGRAM_SIZE - 2) { + for (int i = 0; i < currentSequence.length(); i++) { + buffer[i + 1] = currentSequence.charAt(i); + } + terms.add(new String(buffer)); + Arrays.fill(buffer, (char) 0); + } + int rightStartIdx = NGRAM_SIZE - currentSequence.length() - 2; + rightStartIdx = rightStartIdx < 0 ? NGRAM_SIZE - 2 : rightStartIdx; + for (int j = 0; j < currentSequence.length() && j < NGRAM_SIZE - 1; j++) { + buffer[rightStartIdx - j] = currentSequence.charAt(currentSequence.length() - j - 1); } + terms.add(new String(buffer)); } pos = findNonWildcardSequence(value, pos + rawSequence.length()); rawSequence = getNonWildcardSequence(value, pos); diff --git a/server/src/test/java/org/opensearch/index/mapper/WildcardFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/WildcardFieldMapperTests.java index b19e3687cf944..25aacb41f029d 100644 --- a/server/src/test/java/org/opensearch/index/mapper/WildcardFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/WildcardFieldMapperTests.java @@ -82,22 +82,11 @@ public void testTokenizer() throws IOException { List.of( WildcardFieldTypeTests.prefixAnchored("p"), WildcardFieldTypeTests.prefixAnchored("pi"), - "p", - "pi", "pic", - "i", - "ic", "ick", - "c", - "ck", "ckl", - "k", - "kl", "kle", - "l", - "le", WildcardFieldTypeTests.suffixAnchored("le"), - "e", WildcardFieldTypeTests.suffixAnchored("e") ), terms @@ -111,7 +100,14 @@ public void testTokenizer() throws IOException { terms.add(charTermAttribute.toString()); } } - assertEquals(List.of(WildcardFieldTypeTests.prefixAnchored("a"), "a", WildcardFieldTypeTests.suffixAnchored("a")), terms); + assertEquals( + List.of( + WildcardFieldTypeTests.prefixAnchored("a"), + WildcardFieldTypeTests.suffixAnchored((char) 0 + "a"), + WildcardFieldTypeTests.suffixAnchored("a") + ), + terms + ); } public void testEnableDocValues() throws IOException { @@ -188,13 +184,8 @@ public void testNormalizer() throws IOException { List.of( WildcardFieldTypeTests.prefixAnchored("a"), WildcardFieldTypeTests.prefixAnchored("ab"), - "a", - "ab", "abc", - "b", - "bc", WildcardFieldTypeTests.suffixAnchored("bc"), - "c", WildcardFieldTypeTests.suffixAnchored("c") ), terms @@ -242,13 +233,8 @@ public void testNullValue() throws IOException { List.of( WildcardFieldTypeTests.prefixAnchored("u"), WildcardFieldTypeTests.prefixAnchored("ur"), - "u", - "ur", "uri", - "r", - "ri", WildcardFieldTypeTests.suffixAnchored("ri"), - "i", WildcardFieldTypeTests.suffixAnchored("i") ), terms @@ -281,16 +267,9 @@ public void testDefaults() throws Exception { List.of( WildcardFieldTypeTests.prefixAnchored("1"), WildcardFieldTypeTests.prefixAnchored("12"), - "1", - "12", "123", - "2", - "23", "234", - "3", - "34", WildcardFieldTypeTests.suffixAnchored("34"), - "4", WildcardFieldTypeTests.suffixAnchored("4") ), terms diff --git a/server/src/test/java/org/opensearch/index/mapper/WildcardFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/WildcardFieldTypeTests.java index 1a813495e9033..851e791660d82 100644 --- a/server/src/test/java/org/opensearch/index/mapper/WildcardFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/WildcardFieldTypeTests.java @@ -20,11 +20,19 @@ public class WildcardFieldTypeTests extends FieldTypeTestCase { static String prefixAnchored(String val) { - return (char) 0 + val; + String ret = (char) 0 + val; + if (ret.length() < WildcardFieldMapper.NGRAM_SIZE) { + ret = prefixAnchored(ret); + } + return ret; } static String suffixAnchored(String val) { - return val + (char) 0; + String ret = val + (char) 0; + if (ret.length() < WildcardFieldMapper.NGRAM_SIZE) { + ret = suffixAnchored(ret); + } + return ret; } public void testTermQuery() { @@ -104,13 +112,14 @@ public void testEscapedWildcardQuery() { ft.wildcardQuery("\\**\\*", null, null) ); - assertEquals(new WildcardFieldMapper.WildcardMatchingQuery("field", builder.build(), "\\*"), ft.wildcardQuery("\\*", null, null)); - - expectedTerms.remove(suffixAnchored("*")); + expectedTerms.add(prefixAnchored("*" + (char) 0)); builder = new BooleanQuery.Builder(); for (String term : expectedTerms) { builder.add(new TermQuery(new Term("field", term)), BooleanClause.Occur.FILTER); } + assertEquals(new WildcardFieldMapper.WildcardMatchingQuery("field", builder.build(), "\\*"), ft.wildcardQuery("\\*", null, null)); + builder = new BooleanQuery.Builder(); + builder.add(new TermQuery(new Term("field", prefixAnchored("*"))), BooleanClause.Occur.FILTER); assertEquals(new WildcardFieldMapper.WildcardMatchingQuery("field", builder.build(), "\\**"), ft.wildcardQuery("\\**", null, null)); } @@ -119,7 +128,6 @@ public void testMultipleWildcardsInQuery() { MappedFieldType ft = new WildcardFieldMapper.WildcardFieldType("field"); Set expectedTerms = new HashSet<>(); expectedTerms.add(prefixAnchored("a")); - expectedTerms.add("cd"); expectedTerms.add("efg"); expectedTerms.add(suffixAnchored("h")); BooleanQuery.Builder builder = new BooleanQuery.Builder(); @@ -153,27 +161,27 @@ public void testRegexpQuery() { assertTrue(actualMatchingQuery.getSecondPhaseMatcher().test("foo_apple_foo")); assertFalse(actualMatchingQuery.getSecondPhaseMatcher().test("foo_apply_foo")); - pattern = "ab(zz|cd|ef.*)(hi|jk)"; + pattern = "abc(zzz|def|ghi.*)(jkl|mno)"; builder = new BooleanQuery.Builder(); - builder.add(new TermQuery(new Term("field", "ab")), BooleanClause.Occur.FILTER); + builder.add(new TermQuery(new Term("field", "abc")), BooleanClause.Occur.FILTER); builder.add( - new BooleanQuery.Builder().add(new TermQuery(new Term("field", "zz")), BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term("field", "cd")), BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term("field", "ef")), BooleanClause.Occur.SHOULD) + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "zzz")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("field", "def")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("field", "ghi")), BooleanClause.Occur.SHOULD) .build(), BooleanClause.Occur.FILTER ); builder.add( - new BooleanQuery.Builder().add(new TermQuery(new Term("field", "hi")), BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term("field", "jk")), BooleanClause.Occur.SHOULD) + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "jkl")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("field", "mno")), BooleanClause.Occur.SHOULD) .build(), BooleanClause.Occur.FILTER ); actual = ft.regexpQuery(pattern, 0, 0, 1000, null, null); assertEquals(new WildcardFieldMapper.WildcardMatchingQuery("field", builder.build(), "/" + pattern + "/"), actual); actualMatchingQuery = (WildcardFieldMapper.WildcardMatchingQuery) actual; - assertTrue(actualMatchingQuery.getSecondPhaseMatcher().test("abcdjk")); - assertTrue(actualMatchingQuery.getSecondPhaseMatcher().test("abefqwertyhi")); + assertTrue(actualMatchingQuery.getSecondPhaseMatcher().test("abcdefmno")); + assertTrue(actualMatchingQuery.getSecondPhaseMatcher().test("abcghiqwertyjkl")); } public void testWildcardMatchAll() { From 43e589a0cad13034d06da528ac76c1b9a341ac4a Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Wed, 19 Feb 2025 18:20:37 -0500 Subject: [PATCH 03/48] Explicitly disable FeatureFlag in MetadataCreateIndexServiceTests.testCreateIndexWithContextDisabled (#17384) * Segregate tests in MetadataCreateIndexServiceTests that rely on FeatureFlags being enabled Signed-off-by: Craig Perkins * Remove duplicate methods Signed-off-by: Craig Perkins * Remove unnecessary license Signed-off-by: Craig Perkins * Explicitly disable FF Signed-off-by: Craig Perkins * Remove multiple try blocks Signed-off-by: Craig Perkins * Clean up FF in tearDown Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- .../MetadataCreateIndexServiceTests.java | 264 ++++++++---------- 1 file changed, 124 insertions(+), 140 deletions(-) diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 0bb9ec28a1efc..cc35426ee15b8 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -243,6 +243,14 @@ public void setupCreateIndexRequestAndAliasValidator() { ); } + @After + public void tearDown() throws Exception { + super.tearDown(); + // clear any FeatureFlags needed for individual tests + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + clusterSettings = null; + } + private ClusterState createClusterState(String name, int numShards, int numReplicas, Settings settings) { int numRoutingShards = settings.getAsInt(IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.getKey(), numShards); Metadata.Builder metaBuilder = Metadata.builder(); @@ -2304,6 +2312,8 @@ public void testIndexCreationWithIndexStoreTypeRemoteStoreThrowsException() { } public void testCreateIndexWithContextDisabled() throws Exception { + // Explicitly disable the FF + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, false).build()); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(randomAlphaOfLength(5))); withTemporaryClusterService((clusterService, threadPool) -> { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( @@ -2337,42 +2347,35 @@ public void testCreateIndexWithContextDisabled() throws Exception { public void testCreateIndexWithContextAbsent() throws Exception { FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); - try { - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(randomAlphaOfLength(5))); - withTemporaryClusterService((clusterService, threadPool) -> { - MetadataCreateIndexService checkerService = new MetadataCreateIndexService( - Settings.EMPTY, - clusterService, - indicesServices, - null, - null, - createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), - mock(Environment.class), - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - threadPool, - null, - new SystemIndices(Collections.emptyMap()), - false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE, - repositoriesServiceSupplier - ); - CountDownLatch counter = new CountDownLatch(1); - InvalidIndexContextException exception = expectThrows( - InvalidIndexContextException.class, - () -> checkerService.validateContext(request) - ); - assertTrue( - "Invalid exception message." + exception.getMessage(), - exception.getMessage().contains("index specifies a context which is not loaded on the cluster.") - ); - }); - } finally { - // Disable so that other tests which are not dependent on this are not impacted. - FeatureFlags.initializeFeatureFlags( - Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, false).build() + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(randomAlphaOfLength(5))); + withTemporaryClusterService((clusterService, threadPool) -> { + MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + Settings.EMPTY, + clusterService, + indicesServices, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + mock(Environment.class), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + false, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); - } + CountDownLatch counter = new CountDownLatch(1); + InvalidIndexContextException exception = expectThrows( + InvalidIndexContextException.class, + () -> checkerService.validateContext(request) + ); + assertTrue( + "Invalid exception message." + exception.getMessage(), + exception.getMessage().contains("index specifies a context which is not loaded on the cluster.") + ); + }); } public void testApplyContext() throws IOException { @@ -2420,63 +2423,56 @@ public void testApplyContext() throws IOException { } String contextName = randomAlphaOfLength(5); - try { - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(contextName)); - withTemporaryClusterService((clusterService, threadPool) -> { - MetadataCreateIndexService checkerService = new MetadataCreateIndexService( - Settings.EMPTY, - clusterService, - indicesServices, - null, - null, - createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), - mock(Environment.class), - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - threadPool, - null, - new SystemIndices(Collections.emptyMap()), - false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE, - repositoriesServiceSupplier - ); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(contextName)); + withTemporaryClusterService((clusterService, threadPool) -> { + MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + Settings.EMPTY, + clusterService, + indicesServices, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + mock(Environment.class), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + false, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier + ); - ClusterState mockState = mock(ClusterState.class); - Metadata metadata = mock(Metadata.class); - - when(mockState.metadata()).thenReturn(metadata); - when(metadata.systemTemplatesLookup()).thenReturn(Map.of(contextName, new TreeMap<>() { - { - put(1L, contextName); - } - })); - when(metadata.componentTemplates()).thenReturn(Map.of(contextName, componentTemplate.get())); - - try { - Template template = checkerService.applyContext(request, mockState, allMappings, settingsBuilder); - assertEquals(componentTemplate.get().template(), template); - - assertEquals(2, allMappings.size()); - assertEquals(mappings, allMappings.get(0)); - assertEquals( - MapperService.parseMapping(NamedXContentRegistry.EMPTY, componentTemplate.get().template().mappings().toString()), - allMappings.get(1) - ); - - assertEquals("60s", settingsBuilder.get(INDEX_REFRESH_INTERVAL_SETTING.getKey())); - assertEquals("log_byte_size", settingsBuilder.get(INDEX_MERGE_POLICY.getKey())); - assertEquals("best_compression", settingsBuilder.get(EngineConfig.INDEX_CODEC_SETTING.getKey())); - assertEquals("false", settingsBuilder.get(INDEX_SOFT_DELETES_SETTING.getKey())); - } catch (IOException ex) { - throw new AssertionError(ex); + ClusterState mockState = mock(ClusterState.class); + Metadata metadata = mock(Metadata.class); + + when(mockState.metadata()).thenReturn(metadata); + when(metadata.systemTemplatesLookup()).thenReturn(Map.of(contextName, new TreeMap<>() { + { + put(1L, contextName); } - }); - } finally { - // Disable so that other tests which are not dependent on this are not impacted. - FeatureFlags.initializeFeatureFlags( - Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, false).build() - ); - } + })); + when(metadata.componentTemplates()).thenReturn(Map.of(contextName, componentTemplate.get())); + + try { + Template template = checkerService.applyContext(request, mockState, allMappings, settingsBuilder); + assertEquals(componentTemplate.get().template(), template); + + assertEquals(2, allMappings.size()); + assertEquals(mappings, allMappings.get(0)); + assertEquals( + MapperService.parseMapping(NamedXContentRegistry.EMPTY, componentTemplate.get().template().mappings().toString()), + allMappings.get(1) + ); + + assertEquals("60s", settingsBuilder.get(INDEX_REFRESH_INTERVAL_SETTING.getKey())); + assertEquals("log_byte_size", settingsBuilder.get(INDEX_MERGE_POLICY.getKey())); + assertEquals("best_compression", settingsBuilder.get(EngineConfig.INDEX_CODEC_SETTING.getKey())); + assertEquals("false", settingsBuilder.get(INDEX_SOFT_DELETES_SETTING.getKey())); + } catch (IOException ex) { + throw new AssertionError(ex); + } + }); } public void testApplyContextWithSettingsOverlap() throws IOException { @@ -2508,55 +2504,48 @@ public void testApplyContextWithSettingsOverlap() throws IOException { } String contextName = randomAlphaOfLength(5); - try { - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(contextName)); - withTemporaryClusterService((clusterService, threadPool) -> { - MetadataCreateIndexService checkerService = new MetadataCreateIndexService( - Settings.EMPTY, - clusterService, - indicesServices, - null, - null, - createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), - mock(Environment.class), - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - threadPool, - null, - new SystemIndices(Collections.emptyMap()), - false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE, - repositoriesServiceSupplier - ); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(contextName)); + withTemporaryClusterService((clusterService, threadPool) -> { + MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + Settings.EMPTY, + clusterService, + indicesServices, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + mock(Environment.class), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + false, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier + ); - ClusterState mockState = mock(ClusterState.class); - Metadata metadata = mock(Metadata.class); + ClusterState mockState = mock(ClusterState.class); + Metadata metadata = mock(Metadata.class); - when(mockState.metadata()).thenReturn(metadata); - when(metadata.systemTemplatesLookup()).thenReturn(Map.of(contextName, new TreeMap<>() { - { - put(1L, contextName); - } - })); - when(metadata.componentTemplates()).thenReturn(Map.of(contextName, componentTemplate.get())); + when(mockState.metadata()).thenReturn(metadata); + when(metadata.systemTemplatesLookup()).thenReturn(Map.of(contextName, new TreeMap<>() { + { + put(1L, contextName); + } + })); + when(metadata.componentTemplates()).thenReturn(Map.of(contextName, componentTemplate.get())); - ValidationException validationException = expectThrows( - ValidationException.class, - () -> checkerService.applyContext(request, mockState, List.of(), settingsBuilder) - ); - assertEquals(1, validationException.validationErrors().size()); - assertTrue( - "Invalid exception message: " + validationException.getMessage(), - validationException.getMessage() - .contains("Cannot apply context template as user provide settings have overlap with the included context template") - ); - }); - } finally { - // Disable so that other tests which are not dependent on this are not impacted. - FeatureFlags.initializeFeatureFlags( - Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, false).build() + ValidationException validationException = expectThrows( + ValidationException.class, + () -> checkerService.applyContext(request, mockState, List.of(), settingsBuilder) ); - } + assertEquals(1, validationException.validationErrors().size()); + assertTrue( + "Invalid exception message: " + validationException.getMessage(), + validationException.getMessage() + .contains("Cannot apply context template as user provide settings have overlap with the included context template") + ); + }); } private IndexTemplateMetadata addMatchingTemplate(Consumer configurator) { @@ -2632,9 +2621,4 @@ private DiscoveryNode getRemoteNode() { ); } - @After - public void shutdown() throws Exception { - clusterSettings = null; - } - } From e3a6ccadc942c64e83bd224031bc4d1c6ab14623 Mon Sep 17 00:00:00 2001 From: Asim M Date: Thu, 20 Feb 2025 00:41:50 +0000 Subject: [PATCH 04/48] Introduce `execution_hint` for Cardinality aggregation (#17312) --------- Signed-off-by: Siddharth Rayabharam Signed-off-by: Asim Mahmood Signed-off-by: Asim M Co-authored-by: Siddharth Rayabharam Co-authored-by: Craig Perkins --- CHANGELOG-3.0.md | 1 + .../CardinalityAggregationBuilder.java | 39 +++-- .../metrics/CardinalityAggregator.java | 20 ++- .../metrics/CardinalityAggregatorFactory.java | 36 ++++- .../CardinalityAggregatorSupplier.java | 3 +- .../metrics/CardinalityAggregatorTests.java | 137 ++++++++++++++++++ .../aggregations/AggregatorTestCase.java | 8 +- 7 files changed, 222 insertions(+), 22 deletions(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index bc5e63dbdf8ce..67fef874777e2 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -16,6 +16,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) - Added pull-based Ingestion (APIs, for ingestion source, a Kafka plugin, and IngestionEngine that pulls data from the ingestion source) ([#16958](https://github.com/opensearch-project/OpenSearch/pull/16958)) - Added ConfigurationUtils to core for the ease of configuration parsing [#17223](https://github.com/opensearch-project/OpenSearch/pull/17223) +- Add execution_hint to cardinality aggregator request (#[17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java index a7516a6fd6b24..f77bbfbd48461 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java @@ -68,6 +68,7 @@ public final class CardinalityAggregationBuilder extends ValuesSourceAggregation private static final ParseField REHASH = new ParseField("rehash").withAllDeprecated("no replacement - values will always be rehashed"); public static final ParseField PRECISION_THRESHOLD_FIELD = new ParseField("precision_threshold"); + public static final ParseField EXECUTION_HINT_FIELD = new ParseField("execution_hint"); public static final ObjectParser PARSER = ObjectParser.fromBuilder( NAME, @@ -76,6 +77,7 @@ public final class CardinalityAggregationBuilder extends ValuesSourceAggregation static { ValuesSourceAggregationBuilder.declareFields(PARSER, true, false, false); PARSER.declareLong(CardinalityAggregationBuilder::precisionThreshold, CardinalityAggregationBuilder.PRECISION_THRESHOLD_FIELD); + PARSER.declareString(CardinalityAggregationBuilder::executionHint, CardinalityAggregationBuilder.EXECUTION_HINT_FIELD); PARSER.declareLong((b, v) -> {/*ignore*/}, REHASH); } @@ -85,6 +87,8 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { private Long precisionThreshold = null; + private String executionHint = null; + public CardinalityAggregationBuilder(String name) { super(name); } @@ -96,6 +100,7 @@ public CardinalityAggregationBuilder( ) { super(clone, factoriesBuilder, metadata); this.precisionThreshold = clone.precisionThreshold; + this.executionHint = clone.executionHint; } @Override @@ -111,6 +116,9 @@ public CardinalityAggregationBuilder(StreamInput in) throws IOException { if (in.readBoolean()) { precisionThreshold = in.readLong(); } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + executionHint = in.readOptionalString(); + } } @Override @@ -125,6 +133,9 @@ protected void innerWriteTo(StreamOutput out) throws IOException { if (hasPrecisionThreshold) { out.writeLong(precisionThreshold); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalString(executionHint); + } } @Override @@ -146,13 +157,9 @@ public CardinalityAggregationBuilder precisionThreshold(long precisionThreshold) return this; } - /** - * Get the precision threshold. Higher values improve accuracy but also - * increase memory usage. Will return null if the - * precisionThreshold has not been set yet. - */ - public Long precisionThreshold() { - return precisionThreshold; + public CardinalityAggregationBuilder executionHint(String executionHint) { + this.executionHint = executionHint; + return this; } @Override @@ -162,7 +169,16 @@ protected CardinalityAggregatorFactory innerBuild( AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder ) throws IOException { - return new CardinalityAggregatorFactory(name, config, precisionThreshold, queryShardContext, parent, subFactoriesBuilder, metadata); + return new CardinalityAggregatorFactory( + name, + config, + precisionThreshold, + queryShardContext, + parent, + subFactoriesBuilder, + metadata, + executionHint + ); } @Override @@ -170,12 +186,15 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th if (precisionThreshold != null) { builder.field(PRECISION_THRESHOLD_FIELD.getPreferredName(), precisionThreshold); } + if (executionHint != null) { + builder.field(EXECUTION_HINT_FIELD.getPreferredName(), executionHint); + } return builder; } @Override public int hashCode() { - return Objects.hash(super.hashCode(), precisionThreshold); + return Objects.hash(super.hashCode(), precisionThreshold, executionHint); } @Override @@ -184,7 +203,7 @@ public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) return false; if (super.equals(obj) == false) return false; CardinalityAggregationBuilder other = (CardinalityAggregationBuilder) obj; - return Objects.equals(precisionThreshold, other.precisionThreshold); + return Objects.equals(precisionThreshold, other.precisionThreshold) && Objects.equals(executionHint, other.executionHint); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java index d578c37af8818..f95dbf67fe8af 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java @@ -89,6 +89,7 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue private static final Logger logger = LogManager.getLogger(CardinalityAggregator.class); + private final CardinalityAggregatorFactory.ExecutionMode executionMode; private final int precision; private final ValuesSource valuesSource; @@ -113,7 +114,8 @@ public CardinalityAggregator( int precision, SearchContext context, Aggregator parent, - Map metadata + Map metadata, + CardinalityAggregatorFactory.ExecutionMode executionMode ) throws IOException { super(name, context, parent, metadata); // TODO: Stop using nulls here @@ -121,6 +123,7 @@ public CardinalityAggregator( this.precision = precision; this.counts = valuesSource == null ? null : new HyperLogLogPlusPlus(precision, context.bigArrays(), 1); this.valuesSourceConfig = valuesSourceConfig; + this.executionMode = executionMode; } @Override @@ -144,14 +147,17 @@ private Collector pickCollector(LeafReaderContext ctx) throws IOException { } Collector collector = null; - if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals) { - ValuesSource.Bytes.WithOrdinals source = (ValuesSource.Bytes.WithOrdinals) valuesSource; + if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals source) { final SortedSetDocValues ordinalValues = source.ordinalsValues(ctx); final long maxOrd = ordinalValues.getValueCount(); if (maxOrd == 0) { emptyCollectorsUsed++; return new EmptyCollector(); - } else { + } else if (executionMode == CardinalityAggregatorFactory.ExecutionMode.ORDINALS) { // Force OrdinalsCollector + ordinalsCollectorsUsed++; + collector = new OrdinalsCollector(counts, ordinalValues, context.bigArrays()); + } else if (executionMode == null) { + // no hint provided, fall back to heuristics final long ordinalsMemoryUsage = OrdinalsCollector.memoryOverhead(maxOrd); final long countsMemoryUsage = HyperLogLogPlusPlus.memoryUsage(precision); // only use ordinals if they don't increase memory usage by more than 25% @@ -164,7 +170,7 @@ private Collector pickCollector(LeafReaderContext ctx) throws IOException { } } - if (collector == null) { // not able to build an OrdinalsCollector + if (collector == null) { // not able to build an OrdinalsCollector, or hint is direct stringHashingCollectorsUsed++; collector = new DirectCollector(counts, MurmurHash3Values.hash(valuesSource.bytesValues(ctx))); } @@ -480,7 +486,7 @@ public void close() { * * @opensearch.internal */ - private static class DirectCollector extends Collector { + static class DirectCollector extends Collector { private final MurmurHash3Values hashes; private final HyperLogLogPlusPlus counts; @@ -517,7 +523,7 @@ public void close() { * * @opensearch.internal */ - private static class OrdinalsCollector extends Collector { + static class OrdinalsCollector extends Collector { private static final long SHALLOW_FIXEDBITSET_SIZE = RamUsageEstimator.shallowSizeOfInstance(FixedBitSet.class); diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java index 980667b45324e..3d82386d12e57 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java @@ -44,6 +44,7 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Locale; import java.util.Map; /** @@ -53,6 +54,33 @@ */ class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory { + /** + * Execution mode for cardinality agg + * + * @opensearch.internal + */ + public enum ExecutionMode { + DIRECT, + ORDINALS; + + ExecutionMode() {} + + public static ExecutionMode fromString(String value) { + try { + return ExecutionMode.valueOf(value.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Unknown execution_hint: [" + value + "], expected any of [direct, ordinals]"); + } + } + + @Override + public String toString() { + return this.name().toLowerCase(Locale.ROOT); + } + } + + private final ExecutionMode executionMode; + private final Long precisionThreshold; CardinalityAggregatorFactory( @@ -62,10 +90,12 @@ class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory { QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, - Map metadata + Map metadata, + String executionHint ) throws IOException { super(name, config, queryShardContext, parent, subFactoriesBuilder, metadata); this.precisionThreshold = precisionThreshold; + this.executionMode = executionHint == null ? null : ExecutionMode.fromString(executionHint); } public static void registerAggregators(ValuesSourceRegistry.Builder builder) { @@ -74,7 +104,7 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map metadata) throws IOException { - return new CardinalityAggregator(name, config, precision(), searchContext, parent, metadata); + return new CardinalityAggregator(name, config, precision(), searchContext, parent, metadata, executionMode); } @Override @@ -86,7 +116,7 @@ protected Aggregator doCreateInternal( ) throws IOException { return queryShardContext.getValuesSourceRegistry() .getAggregator(CardinalityAggregationBuilder.REGISTRY_KEY, config) - .build(name, config, precision(), searchContext, parent, metadata); + .build(name, config, precision(), searchContext, parent, metadata, executionMode); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorSupplier.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorSupplier.java index d5cb0242762fd..42426697e7629 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorSupplier.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorSupplier.java @@ -51,6 +51,7 @@ Aggregator build( int precision, SearchContext context, Aggregator parent, - Map metadata + Map metadata, + CardinalityAggregatorFactory.ExecutionMode executionMode ) throws IOException; } diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java index 060e06f7336b3..ca65c888f3363 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java @@ -37,6 +37,7 @@ import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.KeywordField; import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; @@ -66,6 +67,7 @@ import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorTestCase; import org.opensearch.search.aggregations.InternalAggregation; +import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.aggregations.support.AggregationInspectionHelper; @@ -497,4 +499,139 @@ protected CountingAggregator createCountingAggregator( ) ); } + + private void testAggregationExecutionHint( + AggregationBuilder aggregationBuilder, + Query query, + CheckedConsumer buildIndex, + Consumer verify, + Consumer verifyCollector, + MappedFieldType fieldType + ) throws IOException { + try (Directory directory = newDirectory()) { + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + buildIndex.accept(indexWriter); + indexWriter.close(); + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + CountingAggregator aggregator = new CountingAggregator( + new AtomicInteger(), + createAggregator(aggregationBuilder, indexSearcher, fieldType) + ); + aggregator.preCollection(); + indexSearcher.search(query, aggregator); + aggregator.postCollection(); + + MultiBucketConsumerService.MultiBucketConsumer reduceBucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer( + Integer.MAX_VALUE, + new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST) + ); + InternalAggregation.ReduceContext context = InternalAggregation.ReduceContext.forFinalReduction( + aggregator.context().bigArrays(), + getMockScriptService(), + reduceBucketConsumer, + PipelineAggregator.PipelineTree.EMPTY + ); + InternalCardinality topLevel = (InternalCardinality) aggregator.buildTopLevel(); + InternalCardinality card = (InternalCardinality) topLevel.reduce(Collections.singletonList(topLevel), context); + doAssertReducedMultiBucketConsumer(card, reduceBucketConsumer); + + verify.accept(card); + verifyCollector.accept(aggregator.getSelectedCollector()); + } + } + } + + public void testInvalidExecutionHint() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType("number", NumberFieldMapper.NumberType.LONG); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field("number") + .executionHint("invalid"); + assertThrows(IllegalArgumentException.class, () -> testAggregationExecutionHint(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new NumericDocValuesField("number", 7))); + iw.addDocument(singleton(new NumericDocValuesField("number", 8))); + iw.addDocument(singleton(new NumericDocValuesField("number", 9))); + }, card -> { + assertEquals(3, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, collector -> { assertTrue(collector instanceof CardinalityAggregator.DirectCollector); }, fieldType)); + } + + public void testNoExecutionHintWithNumericDocValues() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType("number", NumberFieldMapper.NumberType.LONG); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field("number"); + testAggregationExecutionHint(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new NumericDocValuesField("number", 7))); + iw.addDocument(singleton(new NumericDocValuesField("number", 8))); + iw.addDocument(singleton(new NumericDocValuesField("number", 9))); + }, card -> { + assertEquals(3, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, collector -> { assertTrue(collector instanceof CardinalityAggregator.DirectCollector); }, fieldType); + } + + public void testDirectExecutionHintWithNumericDocValues() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType("number", NumberFieldMapper.NumberType.LONG); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field("number") + .executionHint("direct"); + testAggregationExecutionHint(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new NumericDocValuesField("number", 7))); + iw.addDocument(singleton(new NumericDocValuesField("number", 8))); + iw.addDocument(singleton(new NumericDocValuesField("number", 9))); + }, card -> { + assertEquals(3, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, collector -> { assertTrue(collector instanceof CardinalityAggregator.DirectCollector); }, fieldType); + } + + public void testOrdinalsExecutionHintWithNumericDocValues() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType("number", NumberFieldMapper.NumberType.LONG); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field("number") + .executionHint("ordinals"); + testAggregationExecutionHint(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new NumericDocValuesField("number", 7))); + iw.addDocument(singleton(new NumericDocValuesField("number", 8))); + iw.addDocument(singleton(new NumericDocValuesField("number", 9))); + }, card -> { + assertEquals(3, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, collector -> { assertTrue(collector instanceof CardinalityAggregator.DirectCollector); }, fieldType); + } + + public void testNoExecutionHintWithByteValues() throws IOException { + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("field"); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field("field"); + + testAggregationExecutionHint(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new SortedDocValuesField("field", new BytesRef()))); + }, card -> { + assertEquals(1, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, collector -> { assertTrue(collector instanceof CardinalityAggregator.OrdinalsCollector); }, fieldType); + } + + public void testDirectExecutionHintWithByteValues() throws IOException { + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("field"); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field("field") + .executionHint("direct"); + testAggregationExecutionHint(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new SortedDocValuesField("field", new BytesRef()))); + }, card -> { + assertEquals(1, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, collector -> { assertTrue(collector instanceof CardinalityAggregator.DirectCollector); }, fieldType); + } + + public void testOrdinalsExecutionHintWithByteValues() throws IOException { + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("field"); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field("field") + .executionHint("ordinals"); + testAggregationExecutionHint(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new SortedDocValuesField("field", new BytesRef()))); + }, card -> { + assertEquals(1, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, collector -> { assertTrue(collector instanceof CardinalityAggregator.OrdinalsCollector); }, fieldType); + } } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 78e3d4f50a0d5..eba1769ad882d 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -1331,6 +1331,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { protected static class CountingAggregator extends Aggregator { private final AtomicInteger collectCounter; public final Aggregator delegate; + private LeafBucketCollector selectedCollector; public CountingAggregator(AtomicInteger collectCounter, Aggregator delegate) { this.collectCounter = collectCounter; @@ -1341,6 +1342,10 @@ public AtomicInteger getCollectCount() { return collectCounter; } + public LeafBucketCollector getSelectedCollector() { + return selectedCollector; + } + @Override public void close() { delegate.close(); @@ -1381,7 +1386,8 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOExce return new LeafBucketCollector() { @Override public void collect(int doc, long bucket) throws IOException { - delegate.getLeafCollector(ctx).collect(doc, bucket); + selectedCollector = delegate.getLeafCollector(ctx); + selectedCollector.collect(doc, bucket); collectCounter.incrementAndGet(); } }; From 1b7c0552c3f43461b91a77ff6e937b6f27705a51 Mon Sep 17 00:00:00 2001 From: Owais Kazi Date: Wed, 19 Feb 2025 17:42:09 -0800 Subject: [PATCH 05/48] Bump jetty version in hdfs-fixture to 9.4.57.v20241219 (#17395) Signed-off-by: Owais --- CHANGELOG-3.0.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 67fef874777e2..39b7c758d5ac7 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) - Bump Apache HttpCore5/HttpClient5 dependencies from 5.2.5/5.3.1 to 5.3.1/5.4.1 to support ExtendedSocketOption in HttpAsyncClient ([#16757](https://github.com/opensearch-project/OpenSearch/pull/16757)) +- Bumps `jetty` version from 9.4.55.v20240627 to 9.4.57.v20241219 ### Changed - Changed locale provider from COMPAT to CLDR ([#14345](https://github.com/opensearch-project/OpenSearch/pull/14345)) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 88add6d359e54..b3311a71c3555 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.java' group = 'hdfs' versions << [ - 'jetty': '9.4.55.v20240627' + 'jetty': '9.4.57.v20241219' ] dependencies { From 10fa39d11dc65fdc412a075deaa06205371c664f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Feb 2025 07:36:07 -0500 Subject: [PATCH 06/48] Bump me.champeau.gradle.japicmp from 0.4.5 to 0.4.6 in /server (#17375) * Bump me.champeau.gradle.japicmp from 0.4.5 to 0.4.6 in /server Bumps me.champeau.gradle.japicmp from 0.4.5 to 0.4.6. --- updated-dependencies: - dependency-name: me.champeau.gradle.japicmp dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + server/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aa6e7bce8655d..03f2b019d514a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.ant:ant` from 1.10.14 to 1.10.15 ([#17288](https://github.com/opensearch-project/OpenSearch/pull/17288)) - Bump netty from 4.1.117.Final to 4.1.118.Final ([#17320](https://github.com/opensearch-project/OpenSearch/pull/17320)) - Bump `reactor_netty` from 1.1.26 to 1.1.27 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322)) +- Bump `me.champeau.gradle.japicmp` from 0.4.5 to 0.4.6 ([#17375](https://github.com/opensearch-project/OpenSearch/pull/17375)) ### Changed - Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233) diff --git a/server/build.gradle b/server/build.gradle index 74a9d1a59922d..e1512fb4b2c58 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -36,7 +36,7 @@ plugins { id('opensearch.publish') id('opensearch.internal-cluster-test') id('opensearch.optional-dependencies') - id('me.champeau.gradle.japicmp') version '0.4.5' + id('me.champeau.gradle.japicmp') version '0.4.6' } publishing { From bad652bd84ec5dca9ab9333efba2e9729babdd79 Mon Sep 17 00:00:00 2001 From: kkewwei Date: Thu, 20 Feb 2025 22:50:27 +0800 Subject: [PATCH 07/48] Fix Flaky Test testPendingTasksWithClusterNotRecoveredBlock (#17397) Signed-off-by: kkewwei Signed-off-by: kkewwei --- .../action/admin/cluster/tasks/PendingTasksBlocksIT.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java index 2be4acd16671f..337403fc734a4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java @@ -91,11 +91,11 @@ public void testPendingTasksWithClusterNotRecoveredBlock() throws Exception { } // restart the cluster but prevent it from performing state recovery - final int nodeCount = client().admin().cluster().prepareNodesInfo("data:true", "cluster_manager:true").get().getNodes().size(); + final int dataNodesCount = client().admin().cluster().prepareNodesInfo("data:true").get().getNodes().size(); internalCluster().fullRestart(new InternalTestCluster.RestartCallback() { @Override public Settings onNodeStopped(String nodeName) { - return Settings.builder().put(GatewayService.RECOVER_AFTER_DATA_NODES_SETTING.getKey(), nodeCount + 1).build(); + return Settings.builder().put(GatewayService.RECOVER_AFTER_DATA_NODES_SETTING.getKey(), dataNodesCount + 1).build(); } @Override From f652abc00c233b5367dbc5a9b3621fc612418cb3 Mon Sep 17 00:00:00 2001 From: kkewwei Date: Fri, 21 Feb 2025 01:04:21 +0800 Subject: [PATCH 08/48] Fix Flaky Test ShuffleForcedMergePolicyTests.testDiagnostics (#17392) Signed-off-by: kkewwei Signed-off-by: kkewwei --- .../lucene/index/ShuffleForcedMergePolicyTests.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java b/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java index 46e5d4a76cd9d..58fdb2c503b7d 100644 --- a/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java +++ b/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java @@ -43,6 +43,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.TieredMergePolicy; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.store.Directory; @@ -59,13 +60,17 @@ public class ShuffleForcedMergePolicyTests extends BaseMergePolicyTestCase { public void testDiagnostics() throws IOException { try (Directory dir = newDirectory()) { IndexWriterConfig iwc = newIndexWriterConfig().setMaxFullFlushMergeWaitMillis(0); - MergePolicy mp = new ShuffleForcedMergePolicy(newTieredMergePolicy()); + TieredMergePolicy tieredMergePolicy = newTieredMergePolicy(); + // ensure only trigger one Merge when flushing, and there are remaining segments to be force merged + tieredMergePolicy.setSegmentsPerTier(8); + tieredMergePolicy.setMaxMergeAtOnce(8); + MergePolicy mp = new ShuffleForcedMergePolicy(tieredMergePolicy); iwc.setMergePolicy(mp); boolean sorted = random().nextBoolean(); if (sorted) { iwc.setIndexSort(new Sort(new SortField("sort", SortField.Type.INT))); } - int numDocs = atLeast(100); + int numDocs = 90 + random().nextInt(10); try (IndexWriter writer = new IndexWriter(dir, iwc)) { for (int i = 0; i < numDocs; i++) { From 636dea48ec51aa86400ae2b0991f46bb2086d8a3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Feb 2025 13:35:13 -0500 Subject: [PATCH 09/48] Bump net.minidev:json-smart from 2.5.1 to 2.5.2 in /test/fixtures/hdfs-fixture (#17378) * Bump net.minidev:json-smart in /test/fixtures/hdfs-fixture Bumps [net.minidev:json-smart](https://github.com/netplex/json-smart-v2) from 2.5.1 to 2.5.2. - [Release notes](https://github.com/netplex/json-smart-v2/releases) - [Commits](https://github.com/netplex/json-smart-v2/compare/2.5.1...2.5.2) --- updated-dependencies: - dependency-name: net.minidev:json-smart dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 03f2b019d514a..ab4138c452894 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump netty from 4.1.117.Final to 4.1.118.Final ([#17320](https://github.com/opensearch-project/OpenSearch/pull/17320)) - Bump `reactor_netty` from 1.1.26 to 1.1.27 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322)) - Bump `me.champeau.gradle.japicmp` from 0.4.5 to 0.4.6 ([#17375](https://github.com/opensearch-project/OpenSearch/pull/17375)) +- Bump `net.minidev:json-smart` from 2.5.1 to 2.5.2 ([#17378](https://github.com/opensearch-project/OpenSearch/pull/17378)) ### Changed - Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index b3311a71c3555..d69ddcbd1a07c 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -65,7 +65,7 @@ dependencies { api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" - api 'net.minidev:json-smart:2.5.1' + api 'net.minidev:json-smart:2.5.2' api "org.mockito:mockito-core:${versions.mockito}" api "com.google.protobuf:protobuf-java:${versions.protobuf}" api "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" From abe2333e5315ac6482d61477de22921895472d8f Mon Sep 17 00:00:00 2001 From: Rishabh Maurya Date: Thu, 20 Feb 2025 13:39:07 -0800 Subject: [PATCH 10/48] Arrow Flight Server bootstrap logic (#16962) * Arrow Flight Server bootstrap logic * new plugin for StreamManager implementation * integration with server module * support for SslContext in Flight server and client * ClientManager for creating a pool of flight clients for data nodes * custom event loop group and thread pool for server and client channel Signed-off-by: Rishabh Maurya * interim changes - integration with Auxiliary Transport Signed-off-by: Rishabh Maurya * changes to use grpc-netty-shaded Signed-off-by: Rishabh Maurya * Update javadoc Signed-off-by: Rishabh Maurya * fix the shaded dependencies Signed-off-by: Rishabh Maurya * Move arrow-flight-rpc from module to plugin Signed-off-by: Rishabh Maurya * remove unnecessary imports Signed-off-by: Rishabh Maurya * rebase fixes Signed-off-by: Rishabh Maurya * Fix permissions and other runtime issues Signed-off-by: Rishabh Maurya * Remove StreamManagerWrapper and Node.java changes from PR Signed-off-by: Rishabh Maurya * Fix permissions for test Signed-off-by: Rishabh Maurya * remove testGetFlightClientLocationExecutionError as thread interruption was causing client close failure Signed-off-by: Rishabh Maurya * Fix the issue with single node ClientManager Signed-off-by: Rishabh Maurya * Fix flight server integ test on unix machine Signed-off-by: Rishabh Maurya * suppress JSM removal warning Signed-off-by: Rishabh Maurya * Fix security policy and FlightClientManagerTests Signed-off-by: Rishabh Maurya * remove StreamManagerWrapper from the PR Signed-off-by: Rishabh Maurya * Set multi-release in manifest while shadowing arrow-memory-shaded Signed-off-by: Rishabh Maurya * Disable jacocoReport for shaded projects Signed-off-by: Rishabh Maurya * Remove multi version classes from arrow-memory-shaded Signed-off-by: Rishabh Maurya * Address the PR comments Signed-off-by: Rishabh Maurya * Move the arrow-memory-shaded and flight within flight-rpc plugin Signed-off-by: Rishabh Maurya * Move the arrow-memory-shaded and flight within flight-rpc plugin Signed-off-by: Rishabh Maurya * Detach SPI from Apache Arrow (not needed at the moment), drop all shaded libs (not needed at the moment) Signed-off-by: Andriy Redko * Rebase and other minor refactoring Signed-off-by: Rishabh Maurya * Address PR comments - majorly move away from grpc-netty-shaded Signed-off-by: Rishabh Maurya * remove arrow flight, client from codecov as the package is non opensearch is just for overriding purpose Signed-off-by: Rishabh Maurya * change compileOnly to implementation dep for arrow-spi Signed-off-by: Rishabh Maurya * Rebase from main and fixes related to netty version bump Signed-off-by: Rishabh Maurya * Simplify cloning and overriding logic for FlightServer and FlightClient Signed-off-by: Rishabh Maurya * Only clone FlightClient::Builder class Signed-off-by: Andriy Redko * Only clone FlightServer::Builder class Signed-off-by: Andriy Redko * Update min supported version to 3.0.0 Co-authored-by: Andriy Redko Signed-off-by: Rishabh Maurya * Fix java security permission issue Signed-off-by: Rishabh Maurya * Address PR comments Signed-off-by: Rishabh Maurya * Fix netty system properties Signed-off-by: Rishabh Maurya * Move flight service and other components of flight-rpc-plugin behind feature flag Signed-off-by: Rishabh Maurya * remove system property value set numDirectArenas Signed-off-by: Rishabh Maurya --------- Signed-off-by: Rishabh Maurya Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko Co-authored-by: Andriy Redko Co-authored-by: Andriy Redko --- CHANGELOG-3.0.md | 1 + codecov.yml | 1 + gradle/libs.versions.toml | 2 +- libs/arrow-spi/build.gradle | 70 +- .../licenses/arrow-format-17.0.0.jar.sha1 | 1 - .../arrow-memory-core-17.0.0.jar.sha1 | 1 - .../arrow-memory-netty-17.0.0.jar.sha1 | 1 - ...-memory-netty-buffer-patch-17.0.0.jar.sha1 | 1 - .../licenses/arrow-vector-17.0.0.jar.sha1 | 1 - .../licenses/jackson-databind-LICENSE.txt | 8 - .../licenses/jackson-databind-NOTICE.txt | 20 - .../licenses/netty-common-NOTICE.txt | 264 -- .../opensearch/arrow/spi/StreamManager.java | 4 +- .../opensearch/arrow/spi/StreamProducer.java | 12 +- .../opensearch/arrow/spi/StreamReader.java | 5 +- .../opensearch/arrow/spi/package-info.java | 2 +- plugins/arrow-flight-rpc/build.gradle | 301 +++ .../licenses/arrow-format-18.1.0.jar.sha1 | 1 + .../licenses/arrow-format-LICENSE.txt | 0 .../licenses/arrow-format-NOTICE.txt | 0 .../arrow-memory-core-18.1.0.jar.sha1 | 1 + .../licenses/arrow-memory-core-LICENSE.txt | 0 .../licenses/arrow-memory-core-NOTICE.txt | 0 .../arrow-memory-netty-18.1.0.jar.sha1 | 1 + .../licenses/arrow-memory-netty-LICENSE.txt | 0 .../licenses/arrow-memory-netty-NOTICE.txt | 0 ...-memory-netty-buffer-patch-18.1.0.jar.sha1 | 1 + ...rrow-memory-netty-buffer-patch-LICENSE.txt | 0 ...arrow-memory-netty-buffer-patch-NOTICE.txt | 0 .../licenses/arrow-vector-18.1.0.jar.sha1 | 1 + .../licenses/arrow-vector-LICENSE.txt | 0 .../licenses/arrow-vector-NOTICE.txt | 0 .../licenses/commons-codec-1.16.1.jar.sha1 | 0 .../licenses/commons-codec-LICENSE.txt | 0 .../licenses/commons-codec-NOTICE.txt | 0 .../licenses/failureaccess-1.0.1.jar.sha1 | 1 + .../licenses/failureaccess-LICENSE.txt | 202 ++ .../licenses/failureaccess-NOTICE.txt | 0 .../licenses/flatbuffers-java-2.0.0.jar.sha1 | 0 .../licenses/flatbuffers-java-LICENSE.txt | 0 .../licenses/flatbuffers-java-NOTICE.txt | 0 .../licenses/flight-core-18.1.0.jar.sha1 | 1 + .../licenses/flight-core-LICENSE.txt | 2261 +++++++++++++++++ .../licenses/flight-core-NOTICE.txt | 84 + .../licenses/grpc-LICENSE.txt | 4 +- .../arrow-flight-rpc/licenses/grpc-NOTICE.txt | 62 + .../licenses/grpc-api-1.68.2.jar.sha1 | 1 + .../licenses/grpc-core-1.68.2.jar.sha1 | 1 + .../licenses/grpc-netty-1.68.2.jar.sha1 | 1 + .../licenses/grpc-protobuf-1.68.2.jar.sha1 | 1 + .../grpc-protobuf-lite-1.68.2.jar.sha1 | 1 + .../licenses/grpc-stub-1.68.2.jar.sha1 | 1 + .../licenses/guava-33.3.1-jre.jar.sha1 | 1 + .../licenses/guava-LICENSE.txt | 202 ++ .../licenses/guava-NOTICE.txt | 0 .../arrow-flight-rpc/licenses/jackson-LICENSE | 0 .../arrow-flight-rpc/licenses/jackson-NOTICE | 0 .../jackson-annotations-2.18.2.jar.sha1 | 0 .../licenses/jackson-databind-2.18.2.jar.sha1 | 0 .../licenses/jsr305-3.0.2.jar.sha1 | 1 + .../licenses/jsr305-LICENSE.txt | 29 + .../licenses/jsr305-NOTICE.txt | 1 + .../licenses/netty-LICENSE.txt | 0 .../licenses/netty-NOTICE.txt | 0 .../netty-buffer-4.1.118.Final.jar.sha1 | 0 .../netty-codec-4.1.118.Final.jar.sha1 | 1 + .../netty-codec-http-4.1.118.Final.jar.sha1 | 1 + .../netty-codec-http2-4.1.118.Final.jar.sha1 | 1 + .../netty-common-4.1.118.Final.jar.sha1 | 0 .../netty-handler-4.1.118.Final.jar.sha1 | 1 + .../netty-resolver-4.1.118.Final.jar.sha1 | 1 + ...tty-tcnative-classes-2.0.66.Final.jar.sha1 | 1 + .../netty-transport-4.1.118.Final.jar.sha1 | 1 + ...sport-classes-epoll-4.1.118.Final.jar.sha1 | 1 + ...-native-unix-common-4.1.118.Final.jar.sha1 | 1 + .../licenses/parquet-arrow-1.13.1.jar.sha1 | 1 + .../licenses/parquet-arrow-LICENSE.txt | 218 ++ .../licenses/parquet-arrow-NOTICE.txt | 94 + .../licenses/perfmark-api-0.27.0.jar.sha1 | 1 + .../licenses/perfmark-api-LICENSE.txt | 201 ++ .../licenses/perfmark-api-NOTICE.txt | 41 + .../licenses/slf4j-api-1.7.36.jar.sha1 | 0 .../licenses/slf4j-api-LICENSE.txt | 0 .../licenses/slf4j-api-NOTICE.txt | 0 .../arrow/flight/ArrowFlightServerIT.java | 59 + .../apache/arrow/flight/OSFlightClient.java | 250 ++ .../apache/arrow/flight/OSFlightServer.java | 478 ++++ .../org/apache/arrow/flight/package-info.java | 13 + .../flight/api/FlightServerInfoAction.java | 65 + .../arrow/flight/api/NodeFlightInfo.java | 99 + .../flight/api/NodesFlightInfoAction.java | 29 + .../flight/api/NodesFlightInfoRequest.java | 73 + .../flight/api/NodesFlightInfoResponse.java | 111 + .../api/TransportNodesFlightInfoAction.java | 113 + .../arrow/flight/api/package-info.java | 12 + .../flight/bootstrap/FlightClientManager.java | 252 ++ .../arrow/flight/bootstrap/FlightService.java | 170 ++ .../flight/bootstrap/FlightStreamPlugin.java | 264 ++ .../flight/bootstrap/ServerComponents.java | 286 +++ .../arrow/flight/bootstrap/ServerConfig.java | 218 ++ .../arrow/flight/bootstrap/package-info.java | 12 + .../tls/DefaultSslContextProvider.java | 104 + .../bootstrap/tls/SslContextProvider.java | 35 + .../flight/bootstrap/tls/package-info.java | 12 + .../opensearch/arrow/flight/package-info.java | 12 + .../plugin-metadata/plugin-security.policy | 45 + .../arrow/flight/FlightStreamPluginTests.java | 104 + .../api/FlightServerInfoActionTests.java | 101 + .../arrow/flight/api/NodeFlightInfoTests.java | 160 ++ .../api/NodesFlightInfoRequestTests.java | 39 + .../api/NodesFlightInfoResponseTests.java | 241 ++ .../TransportNodesFlightInfoActionTests.java | 176 ++ .../bootstrap/FlightClientManagerTests.java | 384 +++ .../flight/bootstrap/FlightServiceTests.java | 160 ++ .../flight/bootstrap/ServerConfigTests.java | 80 + server/build.gradle | 1 + .../common/settings/FeatureFlagSettings.java | 3 +- .../opensearch/common/util/FeatureFlags.java | 6 +- .../DefaultSecureTransportParameters.java | 37 + .../SecureTransportSettingsProvider.java | 14 + .../plugins/StreamManagerPlugin.java | 27 + .../opensearch/test/OpenSearchTestCase.java | 8 +- 122 files changed, 7987 insertions(+), 386 deletions(-) delete mode 100644 libs/arrow-spi/licenses/arrow-format-17.0.0.jar.sha1 delete mode 100644 libs/arrow-spi/licenses/arrow-memory-core-17.0.0.jar.sha1 delete mode 100644 libs/arrow-spi/licenses/arrow-memory-netty-17.0.0.jar.sha1 delete mode 100644 libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-17.0.0.jar.sha1 delete mode 100644 libs/arrow-spi/licenses/arrow-vector-17.0.0.jar.sha1 delete mode 100644 libs/arrow-spi/licenses/jackson-databind-LICENSE.txt delete mode 100644 libs/arrow-spi/licenses/jackson-databind-NOTICE.txt delete mode 100644 libs/arrow-spi/licenses/netty-common-NOTICE.txt create mode 100644 plugins/arrow-flight-rpc/build.gradle create mode 100644 plugins/arrow-flight-rpc/licenses/arrow-format-18.1.0.jar.sha1 rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-format-LICENSE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-format-NOTICE.txt (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/arrow-memory-core-18.1.0.jar.sha1 rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-memory-core-LICENSE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-memory-core-NOTICE.txt (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/arrow-memory-netty-18.1.0.jar.sha1 rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-memory-netty-LICENSE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-memory-netty-NOTICE.txt (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-18.1.0.jar.sha1 rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-memory-netty-buffer-patch-LICENSE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-memory-netty-buffer-patch-NOTICE.txt (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/arrow-vector-18.1.0.jar.sha1 rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-vector-LICENSE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-vector-NOTICE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/commons-codec-1.16.1.jar.sha1 (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/commons-codec-LICENSE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/commons-codec-NOTICE.txt (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/failureaccess-1.0.1.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/failureaccess-LICENSE.txt rename libs/arrow-spi/licenses/flatbuffers-java-NOTICE.txt => plugins/arrow-flight-rpc/licenses/failureaccess-NOTICE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/flatbuffers-java-2.0.0.jar.sha1 (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/flatbuffers-java-LICENSE.txt (100%) rename libs/arrow-spi/licenses/slf4j-api-NOTICE.txt => plugins/arrow-flight-rpc/licenses/flatbuffers-java-NOTICE.txt (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/flight-core-18.1.0.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/flight-core-LICENSE.txt create mode 100644 plugins/arrow-flight-rpc/licenses/flight-core-NOTICE.txt rename libs/arrow-spi/licenses/netty-common-LICENSE.txt => plugins/arrow-flight-rpc/licenses/grpc-LICENSE.txt (99%) create mode 100644 plugins/arrow-flight-rpc/licenses/grpc-NOTICE.txt create mode 100644 plugins/arrow-flight-rpc/licenses/grpc-api-1.68.2.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/grpc-core-1.68.2.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/grpc-netty-1.68.2.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/grpc-protobuf-1.68.2.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/grpc-stub-1.68.2.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/guava-33.3.1-jre.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/guava-LICENSE.txt create mode 100644 plugins/arrow-flight-rpc/licenses/guava-NOTICE.txt rename libs/arrow-spi/licenses/jackson-annotations-LICENSE.txt => plugins/arrow-flight-rpc/licenses/jackson-LICENSE (100%) rename libs/arrow-spi/licenses/jackson-annotations-NOTICE.txt => plugins/arrow-flight-rpc/licenses/jackson-NOTICE (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/jackson-annotations-2.18.2.jar.sha1 (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/jackson-databind-2.18.2.jar.sha1 (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/jsr305-3.0.2.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/jsr305-LICENSE.txt create mode 100644 plugins/arrow-flight-rpc/licenses/jsr305-NOTICE.txt rename libs/arrow-spi/licenses/netty-buffer-LICENSE.txt => plugins/arrow-flight-rpc/licenses/netty-LICENSE.txt (100%) rename libs/arrow-spi/licenses/netty-buffer-NOTICE.txt => plugins/arrow-flight-rpc/licenses/netty-NOTICE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/netty-buffer-4.1.118.Final.jar.sha1 (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/netty-codec-4.1.118.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/netty-codec-http-4.1.118.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/netty-codec-http2-4.1.118.Final.jar.sha1 rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/netty-common-4.1.118.Final.jar.sha1 (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/netty-handler-4.1.118.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/netty-resolver-4.1.118.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/netty-tcnative-classes-2.0.66.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/netty-transport-4.1.118.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/netty-transport-classes-epoll-4.1.118.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/netty-transport-native-unix-common-4.1.118.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/parquet-arrow-1.13.1.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/parquet-arrow-LICENSE.txt create mode 100644 plugins/arrow-flight-rpc/licenses/parquet-arrow-NOTICE.txt create mode 100644 plugins/arrow-flight-rpc/licenses/perfmark-api-0.27.0.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/perfmark-api-LICENSE.txt create mode 100644 plugins/arrow-flight-rpc/licenses/perfmark-api-NOTICE.txt rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/slf4j-api-1.7.36.jar.sha1 (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/slf4j-api-LICENSE.txt (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/slf4j-api-NOTICE.txt create mode 100644 plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightClient.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightServer.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/package-info.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/FlightServerInfoAction.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodeFlightInfo.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoAction.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequest.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponse.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoAction.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/package-info.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightClientManager.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightStreamPlugin.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerComponents.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerConfig.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/package-info.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/DefaultSslContextProvider.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/SslContextProvider.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/package-info.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/package-info.java create mode 100644 plugins/arrow-flight-rpc/src/main/plugin-metadata/plugin-security.policy create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/FlightServerInfoActionTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodeFlightInfoTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequestTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponseTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoActionTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/ServerConfigTests.java create mode 100644 server/src/main/java/org/opensearch/plugins/StreamManagerPlugin.java diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 39b7c758d5ac7..fc2fcd361f497 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Added pull-based Ingestion (APIs, for ingestion source, a Kafka plugin, and IngestionEngine that pulls data from the ingestion source) ([#16958](https://github.com/opensearch-project/OpenSearch/pull/16958)) - Added ConfigurationUtils to core for the ease of configuration parsing [#17223](https://github.com/opensearch-project/OpenSearch/pull/17223) - Add execution_hint to cardinality aggregator request (#[17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) +- Arrow Flight RPC plugin with Flight server bootstrap logic and client for internode communication ([#16962](https://github.com/opensearch-project/OpenSearch/pull/16962)) ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) diff --git a/codecov.yml b/codecov.yml index dac8f30956846..e22af90bcdbe1 100644 --- a/codecov.yml +++ b/codecov.yml @@ -4,6 +4,7 @@ codecov: ignore: - "test" - "benchmarks" + - "plugins/arrow-flight-rpc/**/org/apache/arrow/flight/**" coverage: precision: 2 diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 8cd210bbcb65a..abdd87394b35c 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -82,7 +82,7 @@ opentelemetry = "1.46.0" opentelemetrysemconv = "1.29.0-alpha" # arrow dependencies -arrow = "17.0.0" +arrow = "18.1.0" flatbuffers = "2.0.0" [libraries] diff --git a/libs/arrow-spi/build.gradle b/libs/arrow-spi/build.gradle index d14b7e88cfb8c..90a4c162e428b 100644 --- a/libs/arrow-spi/build.gradle +++ b/libs/arrow-spi/build.gradle @@ -10,79 +10,11 @@ */ testingConventions.enabled = false + dependencies { api project(':libs:opensearch-core') - api "org.apache.arrow:arrow-vector:${versions.arrow}" - api "org.apache.arrow:arrow-format:${versions.arrow}" - api "org.apache.arrow:arrow-memory-core:${versions.arrow}" - runtimeOnly "org.apache.arrow:arrow-memory-netty-buffer-patch:${versions.arrow}" - runtimeOnly "org.apache.arrow:arrow-memory-netty:${versions.arrow}" - runtimeOnly "io.netty:netty-buffer:${versions.netty}" - runtimeOnly "io.netty:netty-common:${versions.netty}" - - runtimeOnly "com.google.flatbuffers:flatbuffers-java:${versions.flatbuffers}" - runtimeOnly "org.slf4j:slf4j-api:${versions.slf4j}" - runtimeOnly "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" - - implementation "commons-codec:commons-codec:${versions.commonscodec}" } tasks.named('forbiddenApisMain').configure { replaceSignatureFiles 'jdk-signatures' } - -tasks.named('thirdPartyAudit').configure { - ignoreMissingClasses( - // Logging frameworks - 'org.apache.commons.logging.Log', - 'org.apache.commons.logging.LogFactory', - 'org.apache.log4j.Level', - 'org.apache.log4j.Logger', - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', - - // Reactor BlockHound - 'reactor.blockhound.BlockHound$Builder', - 'reactor.blockhound.integration.BlockHoundIntegration' - ) - - ignoreViolations( - "io.netty.util.internal.PlatformDependent0", - "io.netty.util.internal.PlatformDependent0\$1", - "io.netty.util.internal.PlatformDependent0\$2", - "io.netty.util.internal.PlatformDependent0\$3", - "io.netty.util.internal.PlatformDependent0\$4", - "io.netty.util.internal.PlatformDependent0\$6", - "io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef", - "io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef", - "io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields", - "io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields", - "io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields", - "io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode", - "io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField", - "io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField", - "io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField", - "io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField", - "io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField", - "io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess", - "io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess", - "io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess", - "io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueConsumerIndexField", - "io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerIndexField", - "io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerLimitField", - "org.apache.arrow.memory.ArrowBuf", - "org.apache.arrow.memory.util.ByteFunctionHelpers", - "org.apache.arrow.memory.util.MemoryUtil", - "org.apache.arrow.memory.util.MemoryUtil\$1", - "org.apache.arrow.memory.util.hash.MurmurHasher", - "org.apache.arrow.memory.util.hash.SimpleHasher", - "org.apache.arrow.vector.BaseFixedWidthVector", - "org.apache.arrow.vector.BitVectorHelper", - "org.apache.arrow.vector.Decimal256Vector", - "org.apache.arrow.vector.DecimalVector", - "org.apache.arrow.vector.util.DecimalUtility", - "org.apache.arrow.vector.util.VectorAppender" - ) -} diff --git a/libs/arrow-spi/licenses/arrow-format-17.0.0.jar.sha1 b/libs/arrow-spi/licenses/arrow-format-17.0.0.jar.sha1 deleted file mode 100644 index 34fd4704eac91..0000000000000 --- a/libs/arrow-spi/licenses/arrow-format-17.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5d052f20fd1193840eb59818515e710156c364b2 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-memory-core-17.0.0.jar.sha1 b/libs/arrow-spi/licenses/arrow-memory-core-17.0.0.jar.sha1 deleted file mode 100644 index ea312f4f5e51a..0000000000000 --- a/libs/arrow-spi/licenses/arrow-memory-core-17.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51c5287ef5a624656bb38da7684078905b1a88c9 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-memory-netty-17.0.0.jar.sha1 b/libs/arrow-spi/licenses/arrow-memory-netty-17.0.0.jar.sha1 deleted file mode 100644 index f77b3d836b77b..0000000000000 --- a/libs/arrow-spi/licenses/arrow-memory-netty-17.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de65a34dfeada4d47b161871fa39fa0a2ab4c39c \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-17.0.0.jar.sha1 b/libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-17.0.0.jar.sha1 deleted file mode 100644 index b21b4e8cc7d23..0000000000000 --- a/libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-17.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cdfdaa1bd5135bd869515fc205392ba92dcc1509 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-vector-17.0.0.jar.sha1 b/libs/arrow-spi/licenses/arrow-vector-17.0.0.jar.sha1 deleted file mode 100644 index 8f9fddc882396..0000000000000 --- a/libs/arrow-spi/licenses/arrow-vector-17.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16685545e4734382c1fcdaf12ac9b0a7d1fc06c0 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/jackson-databind-LICENSE.txt b/libs/arrow-spi/licenses/jackson-databind-LICENSE.txt deleted file mode 100644 index f5f45d26a49d6..0000000000000 --- a/libs/arrow-spi/licenses/jackson-databind-LICENSE.txt +++ /dev/null @@ -1,8 +0,0 @@ -This copy of Jackson JSON processor streaming parser/generator is licensed under the -Apache (Software) License, version 2.0 ("the License"). -See the License for details about distribution rights, and the -specific rights regarding derivate works. - -You may obtain a copy of the License at: - -http://www.apache.org/licenses/LICENSE-2.0 diff --git a/libs/arrow-spi/licenses/jackson-databind-NOTICE.txt b/libs/arrow-spi/licenses/jackson-databind-NOTICE.txt deleted file mode 100644 index 4c976b7b4cc58..0000000000000 --- a/libs/arrow-spi/licenses/jackson-databind-NOTICE.txt +++ /dev/null @@ -1,20 +0,0 @@ -# Jackson JSON processor - -Jackson is a high-performance, Free/Open Source JSON processing library. -It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has -been in development since 2007. -It is currently developed by a community of developers, as well as supported -commercially by FasterXML.com. - -## Licensing - -Jackson core and extension components may licensed under different licenses. -To find the details that apply to this artifact see the accompanying LICENSE file. -For more information, including possible other licensing options, contact -FasterXML.com (http://fasterxml.com). - -## Credits - -A list of contributors may be found from CREDITS file, which is included -in some artifacts (usually source distributions); but is always available -from the source code management (SCM) system project uses. diff --git a/libs/arrow-spi/licenses/netty-common-NOTICE.txt b/libs/arrow-spi/licenses/netty-common-NOTICE.txt deleted file mode 100644 index 971865b7c1c23..0000000000000 --- a/libs/arrow-spi/licenses/netty-common-NOTICE.txt +++ /dev/null @@ -1,264 +0,0 @@ - - The Netty Project - ================= - -Please visit the Netty web site for more information: - - * https://netty.io/ - -Copyright 2014 The Netty Project - -The Netty Project licenses this file to you under the Apache License, -version 2.0 (the "License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at: - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -License for the specific language governing permissions and limitations -under the License. - -Also, please refer to each LICENSE..txt file, which is located in -the 'license' directory of the distribution file, for the license terms of the -components that this product depends on. - -------------------------------------------------------------------------------- -This product contains the extensions to Java Collections Framework which has -been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: - - * LICENSE: - * license/LICENSE.jsr166y.txt (Public Domain) - * HOMEPAGE: - * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ - * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ - -This product contains a modified version of Robert Harder's Public Domain -Base64 Encoder and Decoder, which can be obtained at: - - * LICENSE: - * license/LICENSE.base64.txt (Public Domain) - * HOMEPAGE: - * http://iharder.sourceforge.net/current/java/base64/ - -This product contains a modified portion of 'Webbit', an event based -WebSocket and HTTP server, which can be obtained at: - - * LICENSE: - * license/LICENSE.webbit.txt (BSD License) - * HOMEPAGE: - * https://github.com/joewalnes/webbit - -This product contains a modified portion of 'SLF4J', a simple logging -facade for Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.slf4j.txt (MIT License) - * HOMEPAGE: - * https://www.slf4j.org/ - -This product contains a modified portion of 'Apache Harmony', an open source -Java SE, which can be obtained at: - - * NOTICE: - * license/NOTICE.harmony.txt - * LICENSE: - * license/LICENSE.harmony.txt (Apache License 2.0) - * HOMEPAGE: - * https://archive.apache.org/dist/harmony/ - -This product contains a modified portion of 'jbzip2', a Java bzip2 compression -and decompression library written by Matthew J. Francis. It can be obtained at: - - * LICENSE: - * license/LICENSE.jbzip2.txt (MIT License) - * HOMEPAGE: - * https://code.google.com/p/jbzip2/ - -This product contains a modified portion of 'libdivsufsort', a C API library to construct -the suffix array and the Burrows-Wheeler transformed string for any input string of -a constant-size alphabet written by Yuta Mori. It can be obtained at: - - * LICENSE: - * license/LICENSE.libdivsufsort.txt (MIT License) - * HOMEPAGE: - * https://github.com/y-256/libdivsufsort - -This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM, - which can be obtained at: - - * LICENSE: - * license/LICENSE.jctools.txt (ASL2 License) - * HOMEPAGE: - * https://github.com/JCTools/JCTools - -This product optionally depends on 'JZlib', a re-implementation of zlib in -pure Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.jzlib.txt (BSD style License) - * HOMEPAGE: - * http://www.jcraft.com/jzlib/ - -This product optionally depends on 'Compress-LZF', a Java library for encoding and -decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: - - * LICENSE: - * license/LICENSE.compress-lzf.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/ning/compress - -This product optionally depends on 'lz4', a LZ4 Java compression -and decompression library written by Adrien Grand. It can be obtained at: - - * LICENSE: - * license/LICENSE.lz4.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jpountz/lz4-java - -This product optionally depends on 'lzma-java', a LZMA Java compression -and decompression library, which can be obtained at: - - * LICENSE: - * license/LICENSE.lzma-java.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jponge/lzma-java - -This product optionally depends on 'zstd-jni', a zstd-jni Java compression -and decompression library, which can be obtained at: - - * LICENSE: - * license/LICENSE.zstd-jni.txt (BSD) - * HOMEPAGE: - * https://github.com/luben/zstd-jni - -This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression -and decompression library written by William Kinney. It can be obtained at: - - * LICENSE: - * license/LICENSE.jfastlz.txt (MIT License) - * HOMEPAGE: - * https://code.google.com/p/jfastlz/ - -This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data -interchange format, which can be obtained at: - - * LICENSE: - * license/LICENSE.protobuf.txt (New BSD License) - * HOMEPAGE: - * https://github.com/google/protobuf - -This product optionally depends on 'Bouncy Castle Crypto APIs' to generate -a temporary self-signed X.509 certificate when the JVM does not provide the -equivalent functionality. It can be obtained at: - - * LICENSE: - * license/LICENSE.bouncycastle.txt (MIT License) - * HOMEPAGE: - * https://www.bouncycastle.org/ - -This product optionally depends on 'Snappy', a compression library produced -by Google Inc, which can be obtained at: - - * LICENSE: - * license/LICENSE.snappy.txt (New BSD License) - * HOMEPAGE: - * https://github.com/google/snappy - -This product optionally depends on 'JBoss Marshalling', an alternative Java -serialization API, which can be obtained at: - - * LICENSE: - * license/LICENSE.jboss-marshalling.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jboss-remoting/jboss-marshalling - -This product optionally depends on 'Caliper', Google's micro- -benchmarking framework, which can be obtained at: - - * LICENSE: - * license/LICENSE.caliper.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/google/caliper - -This product optionally depends on 'Apache Commons Logging', a logging -framework, which can be obtained at: - - * LICENSE: - * license/LICENSE.commons-logging.txt (Apache License 2.0) - * HOMEPAGE: - * https://commons.apache.org/logging/ - -This product optionally depends on 'Apache Log4J', a logging framework, which -can be obtained at: - - * LICENSE: - * license/LICENSE.log4j.txt (Apache License 2.0) - * HOMEPAGE: - * https://logging.apache.org/log4j/ - -This product optionally depends on 'Aalto XML', an ultra-high performance -non-blocking XML processor, which can be obtained at: - - * LICENSE: - * license/LICENSE.aalto-xml.txt (Apache License 2.0) - * HOMEPAGE: - * https://wiki.fasterxml.com/AaltoHome - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: - - * LICENSE: - * license/LICENSE.hpack.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/twitter/hpack - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: - - * LICENSE: - * license/LICENSE.hyper-hpack.txt (MIT License) - * HOMEPAGE: - * https://github.com/python-hyper/hpack/ - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at: - - * LICENSE: - * license/LICENSE.nghttp2-hpack.txt (MIT License) - * HOMEPAGE: - * https://github.com/nghttp2/nghttp2/ - -This product contains a modified portion of 'Apache Commons Lang', a Java library -provides utilities for the java.lang API, which can be obtained at: - - * LICENSE: - * license/LICENSE.commons-lang.txt (Apache License 2.0) - * HOMEPAGE: - * https://commons.apache.org/proper/commons-lang/ - - -This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build. - - * LICENSE: - * license/LICENSE.mvn-wrapper.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/takari/maven-wrapper - -This product contains the dnsinfo.h header file, that provides a way to retrieve the system DNS configuration on MacOS. -This private header is also used by Apple's open source - mDNSResponder (https://opensource.apple.com/tarballs/mDNSResponder/). - - * LICENSE: - * license/LICENSE.dnsinfo.txt (Apple Public Source License 2.0) - * HOMEPAGE: - * https://www.opensource.apple.com/source/configd/configd-453.19/dnsinfo/dnsinfo.h - -This product optionally depends on 'Brotli4j', Brotli compression and -decompression for Java., which can be obtained at: - - * LICENSE: - * license/LICENSE.brotli4j.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/hyperxpro/Brotli4j diff --git a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamManager.java b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamManager.java index cdb83f032356a..3bee05f0110d1 100644 --- a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamManager.java +++ b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamManager.java @@ -34,7 +34,7 @@ public interface StreamManager extends AutoCloseable { * @return A StreamTicket that can be used to access the stream * @throws IllegalArgumentException if producer is null or parentTaskId is invalid */ - StreamTicket registerStream(StreamProducer producer, TaskId parentTaskId); + StreamTicket registerStream(StreamProducer producer, TaskId parentTaskId); /** * Creates a stream reader for consuming Arrow data using a valid ticket. @@ -46,7 +46,7 @@ public interface StreamManager extends AutoCloseable { * @throws IllegalArgumentException if the ticket is invalid * @throws IllegalStateException if the stream has been cancelled or closed */ - StreamReader getStreamReader(StreamTicket ticket); + StreamReader getStreamReader(StreamTicket ticket); /** * Gets the StreamTicketFactory instance associated with this StreamManager. diff --git a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamProducer.java b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamProducer.java index c5cd6f16adfdd..6ca5b8944319b 100644 --- a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamProducer.java +++ b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamProducer.java @@ -8,8 +8,6 @@ package org.opensearch.arrow.spi; -import org.apache.arrow.memory.BufferAllocator; -import org.apache.arrow.vector.VectorSchemaRoot; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.tasks.TaskId; @@ -77,7 +75,7 @@ * @see StreamReader */ @ExperimentalApi -public interface StreamProducer extends Closeable { +public interface StreamProducer extends Closeable { /** * Creates a VectorSchemaRoot that defines the schema for this stream. This schema will be used @@ -86,7 +84,7 @@ public interface StreamProducer extends Closeable { * @param allocator The allocator to use for creating vectors * @return A new VectorSchemaRoot instance */ - VectorSchemaRoot createRoot(BufferAllocator allocator); + VectorRoot createRoot(Allocator allocator); /** * Creates a job that will produce the stream data in batches. The job will populate @@ -95,7 +93,7 @@ public interface StreamProducer extends Closeable { * @param allocator The allocator to use for any additional memory allocations * @return A new BatchedJob instance */ - BatchedJob createJob(BufferAllocator allocator); + BatchedJob createJob(Allocator allocator); /** * Provides an estimate of the total number of rows that will be produced. @@ -113,7 +111,7 @@ public interface StreamProducer extends Closeable { /** * BatchedJob interface for producing stream data in batches. */ - interface BatchedJob { + interface BatchedJob { /** * Executes the batch processing job. Implementations should populate the root with data @@ -122,7 +120,7 @@ interface BatchedJob { * @param root The VectorSchemaRoot to populate with data * @param flushSignal Signal to coordinate with consumers */ - void run(VectorSchemaRoot root, FlushSignal flushSignal); + void run(VectorRoot root, FlushSignal flushSignal); /** * Called to signal producer when the job is canceled. diff --git a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamReader.java b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamReader.java index b258652988b96..74ad3875238a9 100644 --- a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamReader.java +++ b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamReader.java @@ -8,7 +8,6 @@ package org.opensearch.arrow.spi; -import org.apache.arrow.vector.VectorSchemaRoot; import org.opensearch.common.annotation.ExperimentalApi; import java.io.Closeable; @@ -37,7 +36,7 @@ * @see StreamProducer */ @ExperimentalApi -public interface StreamReader extends Closeable { +public interface StreamReader extends Closeable { /** * Blocking request to load next batch into root. @@ -52,5 +51,5 @@ public interface StreamReader extends Closeable { * * @return the VectorSchemaRoot */ - VectorSchemaRoot getRoot(); + VectorRoot getRoot(); } diff --git a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/package-info.java b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/package-info.java index d075ecaa764bb..14227d69da8b0 100644 --- a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/package-info.java +++ b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/package-info.java @@ -7,6 +7,6 @@ */ /** - * Contains Apache Arrow related classes and Stream generic interfaces + * Contains Stream producer, consumer and manager generic interfaces */ package org.opensearch.arrow.spi; diff --git a/plugins/arrow-flight-rpc/build.gradle b/plugins/arrow-flight-rpc/build.gradle new file mode 100644 index 0000000000000..f3a166bc39ae7 --- /dev/null +++ b/plugins/arrow-flight-rpc/build.gradle @@ -0,0 +1,301 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +apply plugin: 'opensearch.internal-cluster-test' + +opensearchplugin { + description = 'Arrow flight based Stream implementation' + classname = 'org.opensearch.arrow.flight.bootstrap.FlightStreamPlugin' +} + +dependencies { + implementation project(':libs:opensearch-arrow-spi') + compileOnly 'org.checkerframework:checker-qual:3.44.0' + + implementation "org.apache.arrow:arrow-vector:${versions.arrow}" + implementation "org.apache.arrow:arrow-format:${versions.arrow}" + implementation "org.apache.arrow:flight-core:${versions.arrow}" + implementation "org.apache.arrow:arrow-memory-core:${versions.arrow}" + + runtimeOnly "org.apache.arrow:arrow-memory-netty:${versions.arrow}" + runtimeOnly "org.apache.arrow:arrow-memory-netty-buffer-patch:${versions.arrow}" + + implementation "io.netty:netty-buffer:${versions.netty}" + implementation "io.netty:netty-common:${versions.netty}" + + implementation "io.netty:netty-codec:${versions.netty}" + implementation "io.netty:netty-codec-http:${versions.netty}" + implementation "io.netty:netty-codec-http2:${versions.netty}" + implementation "io.netty:netty-handler:${versions.netty}" + implementation "io.netty:netty-resolver:${versions.netty}" + implementation "io.netty:netty-transport:${versions.netty}" + implementation "io.netty:netty-transport-native-unix-common:${versions.netty}" + implementation "io.netty:netty-transport-classes-epoll:${versions.netty}" + implementation "io.netty:netty-tcnative-classes:2.0.66.Final" + + implementation "org.slf4j:slf4j-api:${versions.slf4j}" + runtimeOnly "com.google.flatbuffers:flatbuffers-java:${versions.flatbuffers}" + runtimeOnly "commons-codec:commons-codec:${versions.commonscodec}" + + implementation "io.grpc:grpc-api:${versions.grpc}" + runtimeOnly "io.grpc:grpc-core:${versions.grpc}" + implementation "io.grpc:grpc-stub:${versions.grpc}" + implementation "io.grpc:grpc-netty:${versions.grpc}" + + runtimeOnly group: 'com.google.code.findbugs', name: 'jsr305', version: '3.0.2' + compileOnly 'org.immutables:value:2.10.1' + annotationProcessor 'org.immutables:value:2.10.1' + + runtimeOnly 'io.perfmark:perfmark-api:0.27.0' + runtimeOnly 'org.apache.parquet:parquet-arrow:1.13.1' + runtimeOnly "io.grpc:grpc-protobuf-lite:${versions.grpc}" + runtimeOnly "io.grpc:grpc-protobuf:${versions.grpc}" + implementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + implementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + + runtimeOnly "com.google.guava:failureaccess:1.0.1" + compileOnly "com.google.errorprone:error_prone_annotations:2.31.0" + runtimeOnly('com.google.guava:guava:33.3.1-jre') { + attributes { + attribute(Attribute.of('org.gradle.jvm.environment', String), 'standard-jvm') + } + } +} + +tasks.named('test').configure { + jacoco { + excludes = ['org/apache/arrow/flight/**'] + } +} + +test { + systemProperty 'io.netty.allocator.numDirectArenas', '1' + systemProperty 'io.netty.noUnsafe', 'false' + systemProperty 'io.netty.tryUnsafe', 'true' + systemProperty 'io.netty.tryReflectionSetAccessible', 'true' +} + +internalClusterTest { + systemProperty 'io.netty.allocator.numDirectArenas', '1' + systemProperty 'io.netty.noUnsafe', 'false' + systemProperty 'io.netty.tryUnsafe', 'true' + systemProperty 'io.netty.tryReflectionSetAccessible', 'true' +} + +spotless { + java { + // Files to exclude from formatting + targetExclude 'src/main/java/org/apache/arrow/flight/**/*.java' + } +} + + +tasks.named("dependencyLicenses").configure { + mapping from: /netty-.*/, to: 'netty' + mapping from: /grpc-.*/, to: 'grpc' + mapping from: /jackson-.*/, to: 'jackson' +} + +tasks.named('forbiddenApisMain').configure { + replaceSignatureFiles 'jdk-signatures' + + excludes = [ + 'org/apache/arrow/flight/OSFlightServer$Builder.class', + 'org/apache/arrow/flight/OSFlightClient$Builder.class', + 'org/opensearch/flight/bootstrap/server/ServerConfig$Netty4Configs.class', + 'org/opensearch/flight/bootstrap/server/ServerConfig.class', + 'org/opensearch/flight/bootstrap/tls/DefaultSslContextProvider.class', + 'org/apache/arrow/flight/OpenSearchFlightClient$Builder.class' + ] +} + + +tasks.named('thirdPartyAudit').configure { + ignoreMissingClasses( + 'com.google.gson.stream.JsonReader', + 'com.google.gson.stream.JsonToken', + 'org.apache.parquet.schema.GroupType', + 'com.google.rpc.Status', + 'com.google.rpc.Status$Builder', + // Parquet Schema classes + 'org.apache.parquet.schema.LogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$DateLogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$DecimalLogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$IntLogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$IntervalLogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$ListLogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$LogicalTypeAnnotationVisitor', + 'org.apache.parquet.schema.LogicalTypeAnnotation$StringLogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$TimeLogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$TimeUnit', + 'org.apache.parquet.schema.LogicalTypeAnnotation$TimestampLogicalTypeAnnotation', + 'org.apache.parquet.schema.MessageType', + 'org.apache.parquet.schema.OriginalType', + 'org.apache.parquet.schema.PrimitiveType', + 'org.apache.parquet.schema.PrimitiveType$PrimitiveTypeName', + 'org.apache.parquet.schema.PrimitiveType$PrimitiveTypeNameConverter', + 'org.apache.parquet.schema.Type', + 'org.apache.parquet.schema.Type$Repetition', + 'org.apache.parquet.schema.Types', + 'org.apache.parquet.schema.Types$BaseListBuilder', + 'org.apache.parquet.schema.Types$GroupBuilder', + 'org.apache.parquet.schema.Types$ListBuilder', + 'org.apache.parquet.schema.Types$PrimitiveBuilder', + + 'com.aayushatharva.brotli4j.Brotli4jLoader', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', + 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', + 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', + 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', + // classes are missing + + // from io.netty.logging.CommonsLoggerFactory (netty) + 'org.apache.commons.logging.Log', + 'org.apache.commons.logging.LogFactory', + + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder', + + // from Log4j (deliberate, Netty will fallback to Log4j 2) + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + + // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) + 'org.bouncycastle.cert.X509v3CertificateBuilder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', + 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', + + // from io.netty.handler.ssl.JettyNpnSslEngine (netty) + 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', + 'org.eclipse.jetty.npn.NextProtoNego$ServerProvider', + 'org.eclipse.jetty.npn.NextProtoNego', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteInput (netty) + 'org.jboss.marshalling.ByteInput', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty) + 'org.jboss.marshalling.ByteOutput', + + // from io.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty) + 'org.jboss.marshalling.Marshaller', + + // from io.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty) + 'org.jboss.marshalling.MarshallerFactory', + 'org.jboss.marshalling.MarshallingConfiguration', + 'org.jboss.marshalling.Unmarshaller', + + 'com.google.protobuf.nano.CodedOutputByteBufferNano', + 'com.google.protobuf.nano.MessageNano', + 'com.ning.compress.BufferRecycler', + 'com.ning.compress.lzf.ChunkDecoder', + 'com.ning.compress.lzf.ChunkEncoder', + 'com.ning.compress.lzf.LZFChunk', + 'com.ning.compress.lzf.LZFEncoder', + 'com.ning.compress.lzf.util.ChunkDecoderFactory', + 'com.ning.compress.lzf.util.ChunkEncoderFactory', + 'lzma.sdk.lzma.Encoder', + 'net.jpountz.lz4.LZ4Compressor', + 'net.jpountz.lz4.LZ4Factory', + 'net.jpountz.lz4.LZ4FastDecompressor', + 'net.jpountz.xxhash.XXHash32', + 'net.jpountz.xxhash.XXHashFactory', + 'org.eclipse.jetty.alpn.ALPN$ClientProvider', + 'org.eclipse.jetty.alpn.ALPN$ServerProvider', + 'org.eclipse.jetty.alpn.ALPN', + + 'org.conscrypt.AllocatedBuffer', + 'org.conscrypt.BufferAllocator', + 'org.conscrypt.Conscrypt', + 'org.conscrypt.HandshakeListener', + + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration', + + 'com.google.protobuf.util.Timestamps' + ) + ignoreViolations( + // Guava internal classes + 'com.google.common.cache.Striped64', + 'com.google.common.cache.Striped64$1', + 'com.google.common.cache.Striped64$Cell', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'com.google.common.hash.Striped64', + 'com.google.common.hash.Striped64$1', + 'com.google.common.hash.Striped64$Cell', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', + + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$3', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$4', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$5', + 'io.netty.util.internal.PlatformDependent0', + 'io.netty.util.internal.PlatformDependent0$1', + 'io.netty.util.internal.PlatformDependent0$2', + 'io.netty.util.internal.PlatformDependent0$3', + 'io.netty.util.internal.PlatformDependent0$4', + 'io.netty.util.internal.PlatformDependent0$6', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.PlatformDependent0', + 'io.netty.util.internal.PlatformDependent0$1', + 'io.netty.util.internal.PlatformDependent0$2', + 'io.netty.util.internal.PlatformDependent0$3', + 'io.netty.util.internal.PlatformDependent0$4', + 'io.netty.util.internal.PlatformDependent0$6', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'org.apache.arrow.memory.util.MemoryUtil', + 'org.apache.arrow.memory.util.MemoryUtil$1' + + ) +} diff --git a/plugins/arrow-flight-rpc/licenses/arrow-format-18.1.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/arrow-format-18.1.0.jar.sha1 new file mode 100644 index 0000000000000..6372bcd89eefd --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/arrow-format-18.1.0.jar.sha1 @@ -0,0 +1 @@ +9d356b6f20620f5619ff85b174f97ae507df4997 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-format-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/arrow-format-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-format-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-format-LICENSE.txt diff --git a/libs/arrow-spi/licenses/arrow-format-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/arrow-format-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-format-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-format-NOTICE.txt diff --git a/plugins/arrow-flight-rpc/licenses/arrow-memory-core-18.1.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/arrow-memory-core-18.1.0.jar.sha1 new file mode 100644 index 0000000000000..1a4da42973bfe --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/arrow-memory-core-18.1.0.jar.sha1 @@ -0,0 +1 @@ +35f4853d512f06759759b40b53bac850867886f8 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-memory-core-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/arrow-memory-core-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-memory-core-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-memory-core-LICENSE.txt diff --git a/libs/arrow-spi/licenses/arrow-memory-core-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/arrow-memory-core-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-memory-core-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-memory-core-NOTICE.txt diff --git a/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-18.1.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-18.1.0.jar.sha1 new file mode 100644 index 0000000000000..291d435138e30 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-18.1.0.jar.sha1 @@ -0,0 +1 @@ +9e9e08d0b548d2c02c632e5daaf176e588810d22 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-memory-netty-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-memory-netty-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-memory-netty-LICENSE.txt diff --git a/libs/arrow-spi/licenses/arrow-memory-netty-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-memory-netty-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-memory-netty-NOTICE.txt diff --git a/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-18.1.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-18.1.0.jar.sha1 new file mode 100644 index 0000000000000..40c7b2992d715 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-18.1.0.jar.sha1 @@ -0,0 +1 @@ +86c8fbdb6ab220603ea3a215f48a7f793ac6a08d \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-LICENSE.txt diff --git a/libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-NOTICE.txt diff --git a/plugins/arrow-flight-rpc/licenses/arrow-vector-18.1.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/arrow-vector-18.1.0.jar.sha1 new file mode 100644 index 0000000000000..d526f82b6f06e --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/arrow-vector-18.1.0.jar.sha1 @@ -0,0 +1 @@ +b1fb77f4ef36fd52afe480ba12b7da77367eb88c \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-vector-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/arrow-vector-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-vector-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-vector-LICENSE.txt diff --git a/libs/arrow-spi/licenses/arrow-vector-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/arrow-vector-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-vector-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-vector-NOTICE.txt diff --git a/libs/arrow-spi/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/arrow-flight-rpc/licenses/commons-codec-1.16.1.jar.sha1 similarity index 100% rename from libs/arrow-spi/licenses/commons-codec-1.16.1.jar.sha1 rename to plugins/arrow-flight-rpc/licenses/commons-codec-1.16.1.jar.sha1 diff --git a/libs/arrow-spi/licenses/commons-codec-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/commons-codec-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/commons-codec-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/commons-codec-LICENSE.txt diff --git a/libs/arrow-spi/licenses/commons-codec-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/commons-codec-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/commons-codec-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/commons-codec-NOTICE.txt diff --git a/plugins/arrow-flight-rpc/licenses/failureaccess-1.0.1.jar.sha1 b/plugins/arrow-flight-rpc/licenses/failureaccess-1.0.1.jar.sha1 new file mode 100644 index 0000000000000..4798b37e20691 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/failureaccess-1.0.1.jar.sha1 @@ -0,0 +1 @@ +1dcf1de382a0bf95a3d8b0849546c88bac1292c9 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/failureaccess-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/failureaccess-LICENSE.txt new file mode 100644 index 0000000000000..7a4a3ea2424c0 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/failureaccess-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/libs/arrow-spi/licenses/flatbuffers-java-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/failureaccess-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/flatbuffers-java-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/failureaccess-NOTICE.txt diff --git a/libs/arrow-spi/licenses/flatbuffers-java-2.0.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/flatbuffers-java-2.0.0.jar.sha1 similarity index 100% rename from libs/arrow-spi/licenses/flatbuffers-java-2.0.0.jar.sha1 rename to plugins/arrow-flight-rpc/licenses/flatbuffers-java-2.0.0.jar.sha1 diff --git a/libs/arrow-spi/licenses/flatbuffers-java-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/flatbuffers-java-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/flatbuffers-java-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/flatbuffers-java-LICENSE.txt diff --git a/libs/arrow-spi/licenses/slf4j-api-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/flatbuffers-java-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/slf4j-api-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/flatbuffers-java-NOTICE.txt diff --git a/plugins/arrow-flight-rpc/licenses/flight-core-18.1.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/flight-core-18.1.0.jar.sha1 new file mode 100644 index 0000000000000..fc2e34539cf04 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/flight-core-18.1.0.jar.sha1 @@ -0,0 +1 @@ +82494895fcb0656967680442f63ce1214e532d52 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/flight-core-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/flight-core-LICENSE.txt new file mode 100644 index 0000000000000..7bb1330a1002b --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/flight-core-LICENSE.txt @@ -0,0 +1,2261 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- + +src/arrow/util (some portions): Apache 2.0, and 3-clause BSD + +Some portions of this module are derived from code in the Chromium project, +copyright (c) Google inc and (c) The Chromium Authors and licensed under the +Apache 2.0 License or the under the 3-clause BSD license: + + Copyright (c) 2013 The Chromium Authors. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from Daniel Lemire's FrameOfReference project. + +https://github.com/lemire/FrameOfReference/blob/6ccaf9e97160f9a3b299e23a8ef739e711ef0c71/src/bpacking.cpp +https://github.com/lemire/FrameOfReference/blob/146948b6058a976bc7767262ad3a2ce201486b93/scripts/turbopacking64.py + +Copyright: 2013 Daniel Lemire +Home page: http://lemire.me/en/ +Project page: https://github.com/lemire/FrameOfReference +License: Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from the TensorFlow project + +Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the NumPy project. + +https://github.com/numpy/numpy/blob/e1f191c46f2eebd6cb892a4bfe14d9dd43a06c4e/numpy/core/src/multiarray/multiarraymodule.c#L2910 + +https://github.com/numpy/numpy/blob/68fd82271b9ea5a9e50d4e761061dfcca851382a/numpy/core/src/multiarray/datetime.c + +Copyright (c) 2005-2017, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from the Boost project + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +This project includes code from the FlatBuffers project + +Copyright 2014 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the tslib project + +Copyright 2015 Microsoft Corporation. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the jemalloc project + +https://github.com/jemalloc/jemalloc + +Copyright (C) 2002-2017 Jason Evans . +All rights reserved. +Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. +Copyright (C) 2009-2017 Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice(s), + this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice(s), + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- + +This project includes code from the Go project, BSD 3-clause license + PATENTS +weak patent termination clause +(https://github.com/golang/go/blob/master/PATENTS). + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from the hs2client + +https://github.com/cloudera/hs2client + +Copyright 2016 Cloudera Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +The script ci/scripts/util_wait_for_it.sh has the following license + +Copyright (c) 2016 Giles Hall + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The script r/configure has the following license (MIT) + +Copyright (c) 2017, Jeroen Ooms and Jim Hester + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +cpp/src/arrow/util/logging.cc, cpp/src/arrow/util/logging.h and +cpp/src/arrow/util/logging-test.cc are adapted from +Ray Project (https://github.com/ray-project/ray) (Apache 2.0). + +Copyright (c) 2016 Ray Project (https://github.com/ray-project/ray) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- +The files cpp/src/arrow/vendored/datetime/date.h, cpp/src/arrow/vendored/datetime/tz.h, +cpp/src/arrow/vendored/datetime/tz_private.h, cpp/src/arrow/vendored/datetime/ios.h, +cpp/src/arrow/vendored/datetime/ios.mm, +cpp/src/arrow/vendored/datetime/tz.cpp are adapted from +Howard Hinnant's date library (https://github.com/HowardHinnant/date) +It is licensed under MIT license. + +The MIT License (MIT) +Copyright (c) 2015, 2016, 2017 Howard Hinnant +Copyright (c) 2016 Adrian Colomitchi +Copyright (c) 2017 Florian Dang +Copyright (c) 2017 Paul Thompson +Copyright (c) 2018 Tomasz Kamiński + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/util/utf8.h includes code adapted from the page + https://bjoern.hoehrmann.de/utf-8/decoder/dfa/ +with the following license (MIT) + +Copyright (c) 2008-2009 Bjoern Hoehrmann + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/xxhash/ have the following license +(BSD 2-Clause License) + +xxHash Library +Copyright (c) 2012-2014, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +You can contact the author at : +- xxHash homepage: http://www.xxhash.com +- xxHash source repository : https://github.com/Cyan4973/xxHash + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/double-conversion/ have the following license +(BSD 3-Clause License) + +Copyright 2006-2011, the V8 project authors. All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/uriparser/ have the following license +(BSD 3-Clause License) + +uriparser - RFC 3986 URI parsing library + +Copyright (C) 2007, Weijia Song +Copyright (C) 2007, Sebastian Pipping +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above + copyright notice, this list of conditions and the following + disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + + * Neither the name of the nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files under dev/tasks/conda-recipes have the following license + +BSD 3-clause license +Copyright (c) 2015-2018, conda-forge +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/utfcpp/ have the following license + +Copyright 2006-2018 Nemanja Trifunovic + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +This project includes code from Apache Kudu. + + * cpp/cmake_modules/CompilerInfo.cmake is based on Kudu's cmake_modules/CompilerInfo.cmake + +Copyright: 2016 The Apache Software Foundation. +Home page: https://kudu.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Apache Impala (incubating), formerly +Impala. The Impala code and rights were donated to the ASF as part of the +Incubator process after the initial code imports into Apache Parquet. + +Copyright: 2012 Cloudera, Inc. +Copyright: 2016 The Apache Software Foundation. +Home page: http://impala.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Apache Aurora. + +* dev/release/{release,changelog,release-candidate} are based on the scripts from + Apache Aurora + +Copyright: 2016 The Apache Software Foundation. +Home page: https://aurora.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from the Google styleguide. + +* cpp/build-support/cpplint.py is based on the scripts from the Google styleguide. + +Copyright: 2009 Google Inc. All rights reserved. +Homepage: https://github.com/google/styleguide +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +This project includes code from Snappy. + +* cpp/cmake_modules/{SnappyCMakeLists.txt,SnappyConfig.h} are based on code + from Google's Snappy project. + +Copyright: 2009 Google Inc. All rights reserved. +Homepage: https://github.com/google/snappy +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +This project includes code from the manylinux project. + +* python/manylinux1/scripts/{build_python.sh,python-tag-abi-tag.py, + requirements.txt} are based on code from the manylinux project. + +Copyright: 2016 manylinux +Homepage: https://github.com/pypa/manylinux +License: The MIT License (MIT) + +-------------------------------------------------------------------------------- + +This project includes code from the cymove project: + +* python/pyarrow/includes/common.pxd includes code from the cymove project + +The MIT License (MIT) +Copyright (c) 2019 Omer Ozarslan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +The projects includes code from the Ursabot project under the dev/archery +directory. + +License: BSD 2-Clause + +Copyright 2019 RStudio, Inc. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project include code from mingw-w64. + +* cpp/src/arrow/util/cpu-info.cc has a polyfill for mingw-w64 < 5 + +Copyright (c) 2009 - 2013 by the mingw-w64 project +Homepage: https://mingw-w64.org +License: Zope Public License (ZPL) Version 2.1. + +--------------------------------------------------------------------------------- + +This project include code from Google's Asylo project. + +* cpp/src/arrow/result.h is based on status_or.h + +Copyright (c) Copyright 2017 Asylo authors +Homepage: https://asylo.dev/ +License: Apache 2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Google's protobuf project + +* cpp/src/arrow/result.h ARROW_ASSIGN_OR_RAISE is based off ASSIGN_OR_RETURN +* cpp/src/arrow/util/bit_stream_utils.h contains code from wire_format_lite.h + +Copyright 2008 Google Inc. All rights reserved. +Homepage: https://developers.google.com/protocol-buffers/ +License: + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. + +-------------------------------------------------------------------------------- + +3rdparty dependency LLVM is statically linked in certain binary distributions. +Additionally some sections of source code have been derived from sources in LLVM +and have been clearly labeled as such. LLVM has the following license: + +============================================================================== +The LLVM Project is under the Apache License v2.0 with LLVM Exceptions: +============================================================================== + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +---- LLVM Exceptions to the Apache 2.0 License ---- + +As an exception, if, as a result of your compiling your source code, portions +of this Software are embedded into an Object form of such source code, you +may redistribute such embedded portions in such Object form without complying +with the conditions of Sections 4(a), 4(b) and 4(d) of the License. + +In addition, if you combine or link compiled forms of this Software with +software that is licensed under the GPLv2 ("Combined Software") and if a +court of competent jurisdiction determines that the patent provision (Section +3), the indemnity provision (Section 9) or other Section of the License +conflicts with the conditions of the GPLv2, you may retroactively and +prospectively choose to deem waived or otherwise exclude such Section(s) of +the License, but only in their entirety and only with respect to the Combined +Software. + +============================================================================== +Software from third parties included in the LLVM Project: +============================================================================== +The LLVM Project contains third party software which is under different license +terms. All such code will be identified clearly using at least one of two +mechanisms: +1) It will be in a separate directory tree with its own `LICENSE.txt` or + `LICENSE` file at the top containing the specific license and restrictions + which apply to that software, or +2) It will contain specific license and restriction terms at the top of every + file. + +-------------------------------------------------------------------------------- + +3rdparty dependency gRPC is statically linked in certain binary +distributions, like the python wheels. gRPC has the following license: + +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency Apache Thrift is statically linked in certain binary +distributions, like the python wheels. Apache Thrift has the following license: + +Apache Thrift +Copyright (C) 2006 - 2019, The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency Apache ORC is statically linked in certain binary +distributions, like the python wheels. Apache ORC has the following license: + +Apache ORC +Copyright 2013-2019 The Apache Software Foundation + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by Hewlett-Packard: +(c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency zstd is statically linked in certain binary +distributions, like the python wheels. ZSTD has the following license: + +BSD License + +For Zstandard software + +Copyright (c) 2016-present, Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name Facebook nor the names of its contributors may be used to + endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency lz4 is statically linked in certain binary +distributions, like the python wheels. lz4 has the following license: + +LZ4 Library +Copyright (c) 2011-2016, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency Brotli is statically linked in certain binary +distributions, like the python wheels. Brotli has the following license: + +Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------- + +3rdparty dependency rapidjson is statically linked in certain binary +distributions, like the python wheels. rapidjson and its dependencies have the +following licenses: + +Tencent is pleased to support the open source community by making RapidJSON +available. + +Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. +All rights reserved. + +If you have downloaded a copy of the RapidJSON binary from Tencent, please note +that the RapidJSON binary is licensed under the MIT License. +If you have downloaded a copy of the RapidJSON source code from Tencent, please +note that RapidJSON source code is licensed under the MIT License, except for +the third-party components listed below which are subject to different license +terms. Your integration of RapidJSON into your own projects may require +compliance with the MIT License, as well as the other licenses applicable to +the third-party components included within RapidJSON. To avoid the problematic +JSON license in your own projects, it's sufficient to exclude the +bin/jsonchecker/ directory, as it's the only code under the JSON license. +A copy of the MIT License is included in this file. + +Other dependencies and licenses: + + Open Source Software Licensed Under the BSD License: + -------------------------------------------------------------------- + + The msinttypes r29 + Copyright (c) 2006-2013 Alexander Chemeris + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + DAMAGE. + + Terms of the MIT License: + -------------------------------------------------------------------- + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +3rdparty dependency snappy is statically linked in certain binary +distributions, like the python wheels. snappy has the following license: + +Copyright 2011, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Google Inc. nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=== + +Some of the benchmark data in testdata/ is licensed differently: + + - fireworks.jpeg is Copyright 2013 Steinar H. Gunderson, and + is licensed under the Creative Commons Attribution 3.0 license + (CC-BY-3.0). See https://creativecommons.org/licenses/by/3.0/ + for more information. + + - kppkn.gtb is taken from the Gaviota chess tablebase set, and + is licensed under the MIT License. See + https://sites.google.com/site/gaviotachessengine/Home/endgame-tablebases-1 + for more information. + + - paper-100k.pdf is an excerpt (bytes 92160 to 194560) from the paper + “Combinatorial Modeling of Chromatin Features Quantitatively Predicts DNA + Replication Timing in _Drosophila_” by Federico Comoglio and Renato Paro, + which is licensed under the CC-BY license. See + http://www.ploscompbiol.org/static/license for more ifnormation. + + - alice29.txt, asyoulik.txt, plrabn12.txt and lcet10.txt are from Project + Gutenberg. The first three have expired copyrights and are in the public + domain; the latter does not have expired copyright, but is still in the + public domain according to the license information + (http://www.gutenberg.org/ebooks/53). + +-------------------------------------------------------------------------------- + +3rdparty dependency gflags is statically linked in certain binary +distributions, like the python wheels. gflags has the following license: + +Copyright (c) 2006, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency glog is statically linked in certain binary +distributions, like the python wheels. glog has the following license: + +Copyright (c) 2008, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +A function gettimeofday in utilities.cc is based on + +http://www.google.com/codesearch/p?hl=en#dR3YEbitojA/COPYING&q=GetSystemTimeAsFileTime%20license:bsd + +The license of this code is: + +Copyright (c) 2003-2008, Jouni Malinen and contributors +All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name(s) of the above-listed copyright holder(s) nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency re2 is statically linked in certain binary +distributions, like the python wheels. re2 has the following license: + +Copyright (c) 2009 The RE2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency c-ares is statically linked in certain binary +distributions, like the python wheels. c-ares has the following license: + +# c-ares license + +Copyright (c) 2007 - 2018, Daniel Stenberg with many contributors, see AUTHORS +file. + +Copyright 1998 by the Massachusetts Institute of Technology. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, provided that +the above copyright notice appear in all copies and that both that copyright +notice and this permission notice appear in supporting documentation, and that +the name of M.I.T. not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior permission. +M.I.T. makes no representations about the suitability of this software for any +purpose. It is provided "as is" without express or implied warranty. + +-------------------------------------------------------------------------------- + +3rdparty dependency zlib is redistributed as a dynamically linked shared +library in certain binary distributions, like the python wheels. In the future +this will likely change to static linkage. zlib has the following license: + +zlib.h -- interface of the 'zlib' general purpose compression library + version 1.2.11, January 15th, 2017 + + Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + +-------------------------------------------------------------------------------- + +3rdparty dependency openssl is redistributed as a dynamically linked shared +library in certain binary distributions, like the python wheels. openssl +preceding version 3 has the following license: + + LICENSE ISSUES + ============== + + The OpenSSL toolkit stays under a double license, i.e. both the conditions of + the OpenSSL License and the original SSLeay license apply to the toolkit. + See below for the actual license texts. + + OpenSSL License + --------------- + +/* ==================================================================== + * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ + + Original SSLeay License + ----------------------- + +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +-------------------------------------------------------------------------------- + +This project includes code from the rtools-backports project. + +* ci/scripts/PKGBUILD and ci/scripts/r_windows_build.sh are based on code + from the rtools-backports project. + +Copyright: Copyright (c) 2013 - 2019, Алексей and Jeroen Ooms. +All rights reserved. +Homepage: https://github.com/r-windows/rtools-backports +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +Some code from pandas has been adapted for the pyarrow codebase. pandas is +available under the 3-clause BSD license, which follows: + +pandas license +============== + +Copyright (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team +All rights reserved. + +Copyright (c) 2008-2011 AQR Capital Management, LLC +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the copyright holder nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +Some bits from DyND, in particular aspects of the build system, have been +adapted from libdynd and dynd-python under the terms of the BSD 2-clause +license + +The BSD 2-Clause License + + Copyright (C) 2011-12, Dynamic NDArray Developers + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Dynamic NDArray Developers list: + + * Mark Wiebe + * Continuum Analytics + +-------------------------------------------------------------------------------- + +Some source code from Ibis (https://github.com/cloudera/ibis) has been adapted +for PyArrow. Ibis is released under the Apache License, Version 2.0. + +-------------------------------------------------------------------------------- + +dev/tasks/homebrew-formulae/apache-arrow.rb has the following license: + +BSD 2-Clause License + +Copyright (c) 2009-present, Homebrew contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- + +cpp/src/arrow/vendored/base64.cpp has the following license + +ZLIB License + +Copyright (C) 2004-2017 René Nyffenegger + +This source code is provided 'as-is', without any express or implied +warranty. In no event will the author be held liable for any damages arising +from the use of this software. + +Permission is granted to anyone to use this software for any purpose, including +commercial applications, and to alter it and redistribute it freely, subject to +the following restrictions: + +1. The origin of this source code must not be misrepresented; you must not + claim that you wrote the original source code. If you use this source code + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original source code. + +3. This notice may not be removed or altered from any source distribution. + +René Nyffenegger rene.nyffenegger@adp-gmbh.ch + +-------------------------------------------------------------------------------- + +This project includes code from Folly. + + * cpp/src/arrow/vendored/ProducerConsumerQueue.h + +is based on Folly's + + * folly/Portability.h + * folly/lang/Align.h + * folly/ProducerConsumerQueue.h + +Copyright: Copyright (c) Facebook, Inc. and its affiliates. +Home page: https://github.com/facebook/folly +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/vendored/musl/strptime.c has the following license + +Copyright © 2005-2020 Rich Felker, et al. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +The file cpp/cmake_modules/BuildUtils.cmake contains code from + +https://gist.github.com/cristianadam/ef920342939a89fae3e8a85ca9459b49 + +which is made available under the MIT license + +Copyright (c) 2019 Cristian Adam + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/portable-snippets/ contain code from + +https://github.com/nemequ/portable-snippets + +and have the following copyright notice: + +Each source file contains a preamble explaining the license situation +for that file, which takes priority over this file. With the +exception of some code pulled in from other repositories (such as +µnit, an MIT-licensed project which is used for testing), the code is +public domain, released using the CC0 1.0 Universal dedication (*). + +(*) https://creativecommons.org/publicdomain/zero/1.0/legalcode + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/fast_float/ contain code from + +https://github.com/lemire/fast_float + +which is made available under the Apache License 2.0. + +-------------------------------------------------------------------------------- + +The file python/pyarrow/vendored/docscrape.py contains code from + +https://github.com/numpy/numpydoc/ + +which is made available under the BSD 2-clause license. + +-------------------------------------------------------------------------------- + +The file python/pyarrow/vendored/version.py contains code from + +https://github.com/pypa/packaging/ + +which is made available under both the Apache license v2.0 and the +BSD 2-clause license. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/pcg contain code from + +https://github.com/imneme/pcg-cpp + +and have the following copyright notice: + +Copyright 2014-2019 Melissa O'Neill , + and the PCG Project contributors. + +SPDX-License-Identifier: (Apache-2.0 OR MIT) + +Licensed under the Apache License, Version 2.0 (provided in +LICENSE-APACHE.txt and at http://www.apache.org/licenses/LICENSE-2.0) +or under the MIT license (provided in LICENSE-MIT.txt and at +http://opensource.org/licenses/MIT), at your option. This file may not +be copied, modified, or distributed except according to those terms. + +Distributed on an "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, either +express or implied. See your chosen license for details. + +-------------------------------------------------------------------------------- +r/R/dplyr-count-tally.R (some portions) + +Some portions of this file are derived from code from + +https://github.com/tidyverse/dplyr/ + +which is made available under the MIT license + +Copyright (c) 2013-2019 RStudio and others. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The file src/arrow/util/io_util.cc contains code from the CPython project +which is made available under the Python Software Foundation License Version 2. + +-------------------------------------------------------------------------------- + +3rdparty dependency opentelemetry-cpp is statically linked in certain binary +distributions. opentelemetry-cpp is made available under the Apache License 2.0. + +Copyright The OpenTelemetry Authors +SPDX-License-Identifier: Apache-2.0 + +-------------------------------------------------------------------------------- + +ci/conan/ is based on code from Conan Package and Dependency Manager. + +Copyright (c) 2019 Conan.io + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +3rdparty dependency UCX is redistributed as a dynamically linked shared +library in certain binary distributions. UCX has the following license: + +Copyright (c) 2014-2015 UT-Battelle, LLC. All rights reserved. +Copyright (C) 2014-2020 Mellanox Technologies Ltd. All rights reserved. +Copyright (C) 2014-2015 The University of Houston System. All rights reserved. +Copyright (C) 2015 The University of Tennessee and The University + of Tennessee Research Foundation. All rights reserved. +Copyright (C) 2016-2020 ARM Ltd. All rights reserved. +Copyright (c) 2016 Los Alamos National Security, LLC. All rights reserved. +Copyright (C) 2016-2020 Advanced Micro Devices, Inc. All rights reserved. +Copyright (C) 2019 UChicago Argonne, LLC. All rights reserved. +Copyright (c) 2018-2020 NVIDIA CORPORATION. All rights reserved. +Copyright (C) 2020 Huawei Technologies Co., Ltd. All rights reserved. +Copyright (C) 2016-2020 Stony Brook University. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The file dev/tasks/r/github.packages.yml contains code from + +https://github.com/ursa-labs/arrow-r-nightly + +which is made available under the Apache License 2.0. + +-------------------------------------------------------------------------------- +.github/actions/sync-nightlies/action.yml (some portions) + +Some portions of this file are derived from code from + +https://github.com/JoshPiper/rsync-docker + +which is made available under the MIT license + +Copyright (c) 2020 Joshua Piper + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +.github/actions/sync-nightlies/action.yml (some portions) + +Some portions of this file are derived from code from + +https://github.com/burnett01/rsync-deployments + +which is made available under the MIT license + +Copyright (c) 2019-2022 Contention +Copyright (c) 2019-2022 Burnett01 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +java/vector/src/main/java/org/apache/arrow/vector/util/IntObjectHashMap.java +java/vector/src/main/java/org/apache/arrow/vector/util/IntObjectMap.java + +These file are derived from code from Netty, which is made available under the +Apache License 2.0. diff --git a/plugins/arrow-flight-rpc/licenses/flight-core-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/flight-core-NOTICE.txt new file mode 100644 index 0000000000000..2089c6fb20358 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/flight-core-NOTICE.txt @@ -0,0 +1,84 @@ +Apache Arrow +Copyright 2016-2024 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This product includes software from the SFrame project (BSD, 3-clause). +* Copyright (C) 2015 Dato, Inc. +* Copyright (c) 2009 Carnegie Mellon University. + +This product includes software from the Feather project (Apache 2.0) +https://github.com/wesm/feather + +This product includes software from the DyND project (BSD 2-clause) +https://github.com/libdynd + +This product includes software from the LLVM project + * distributed under the University of Illinois Open Source + +This product includes software from the google-lint project + * Copyright (c) 2009 Google Inc. All rights reserved. + +This product includes software from the mman-win32 project + * Copyright https://code.google.com/p/mman-win32/ + * Licensed under the MIT License; + +This product includes software from the LevelDB project + * Copyright (c) 2011 The LevelDB Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * Moved from Kudu http://github.com/cloudera/kudu + +This product includes software from the CMake project + * Copyright 2001-2009 Kitware, Inc. + * Copyright 2012-2014 Continuum Analytics, Inc. + * All rights reserved. + +This product includes software from https://github.com/matthew-brett/multibuild (BSD 2-clause) + * Copyright (c) 2013-2016, Matt Terry and Matthew Brett; all rights reserved. + +This product includes software from the Ibis project (Apache 2.0) + * Copyright (c) 2015 Cloudera, Inc. + * https://github.com/cloudera/ibis + +This product includes software from Dremio (Apache 2.0) + * Copyright (C) 2017-2018 Dremio Corporation + * https://github.com/dremio/dremio-oss + +This product includes software from Google Guava (Apache 2.0) + * Copyright (C) 2007 The Guava Authors + * https://github.com/google/guava + +This product include software from CMake (BSD 3-Clause) + * CMake - Cross Platform Makefile Generator + * Copyright 2000-2019 Kitware, Inc. and Contributors + +The web site includes files generated by Jekyll. + +-------------------------------------------------------------------------------- + +This product includes code from Apache Kudu, which includes the following in +its NOTICE file: + + Apache Kudu + Copyright 2016 The Apache Software Foundation + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + + Portions of this software were developed at + Cloudera, Inc (http://www.cloudera.com/). + +-------------------------------------------------------------------------------- + +This product includes code from Apache ORC, which includes the following in +its NOTICE file: + + Apache ORC + Copyright 2013-2019 The Apache Software Foundation + + This product includes software developed by The Apache Software + Foundation (http://www.apache.org/). + + This product includes software developed by Hewlett-Packard: + (c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P diff --git a/libs/arrow-spi/licenses/netty-common-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/grpc-LICENSE.txt similarity index 99% rename from libs/arrow-spi/licenses/netty-common-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/grpc-LICENSE.txt index 62589edd12a37..d645695673349 100644 --- a/libs/arrow-spi/licenses/netty-common-LICENSE.txt +++ b/plugins/arrow-flight-rpc/licenses/grpc-LICENSE.txt @@ -1,7 +1,7 @@ Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -193,7 +193,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/plugins/arrow-flight-rpc/licenses/grpc-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/grpc-NOTICE.txt new file mode 100644 index 0000000000000..f70c5620cf75a --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/grpc-NOTICE.txt @@ -0,0 +1,62 @@ +Copyright 2014 The gRPC Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +----------------------------------------------------------------------- + +This product contains a modified portion of 'OkHttp', an open source +HTTP & SPDY client for Android and Java applications, which can be obtained +at: + + * LICENSE: + * okhttp/third_party/okhttp/LICENSE (Apache License 2.0) + * HOMEPAGE: + * https://github.com/square/okhttp + * LOCATION_IN_GRPC: + * okhttp/third_party/okhttp + +This product contains a modified portion of 'Envoy', an open source +cloud-native high-performance edge/middle/service proxy, which can be +obtained at: + + * LICENSE: + * xds/third_party/envoy/LICENSE (Apache License 2.0) + * NOTICE: + * xds/third_party/envoy/NOTICE + * HOMEPAGE: + * https://www.envoyproxy.io + * LOCATION_IN_GRPC: + * xds/third_party/envoy + +This product contains a modified portion of 'protoc-gen-validate (PGV)', +an open source protoc plugin to generate polyglot message validators, +which can be obtained at: + + * LICENSE: + * xds/third_party/protoc-gen-validate/LICENSE (Apache License 2.0) + * NOTICE: + * xds/third_party/protoc-gen-validate/NOTICE + * HOMEPAGE: + * https://github.com/envoyproxy/protoc-gen-validate + * LOCATION_IN_GRPC: + * xds/third_party/protoc-gen-validate + +This product contains a modified portion of 'udpa', +an open source universal data plane API, which can be obtained at: + + * LICENSE: + * xds/third_party/udpa/LICENSE (Apache License 2.0) + * HOMEPAGE: + * https://github.com/cncf/udpa + * LOCATION_IN_GRPC: + * xds/third_party/udpa diff --git a/plugins/arrow-flight-rpc/licenses/grpc-api-1.68.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/grpc-api-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..1844172dec982 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/grpc-api-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a257a5dd25dda1c97a99b56d5b9c1e56c12ae554 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/grpc-core-1.68.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/grpc-core-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..e20345d29e914 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/grpc-core-1.68.2.jar.sha1 @@ -0,0 +1 @@ +b0fd51a1c029785d1c9ae2cfc80a296b60dfcfdb \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/grpc-netty-1.68.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/grpc-netty-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..36be00ed13330 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/grpc-netty-1.68.2.jar.sha1 @@ -0,0 +1 @@ +3c3279d2e3520195fd26e0c3d9aca2ed1157d8c3 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/grpc-protobuf-1.68.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/grpc-protobuf-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..e861b41837f33 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/grpc-protobuf-1.68.2.jar.sha1 @@ -0,0 +1 @@ +35b28e0d57874021cd31e76dd4a795f76a82471e \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..b2401f9752829 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a53064b896adcfefe74362a33e111492351dfc03 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/grpc-stub-1.68.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/grpc-stub-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..118464f8f48ff --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/grpc-stub-1.68.2.jar.sha1 @@ -0,0 +1 @@ +d58ee1cf723b4b5536d44b67e328c163580a8d98 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/guava-33.3.1-jre.jar.sha1 b/plugins/arrow-flight-rpc/licenses/guava-33.3.1-jre.jar.sha1 new file mode 100644 index 0000000000000..ce59350c0d430 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/guava-33.3.1-jre.jar.sha1 @@ -0,0 +1 @@ +852f8b363da0111e819460021ca693cacca3e8db \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/guava-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/guava-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/guava-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/arrow-flight-rpc/licenses/guava-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/guava-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/libs/arrow-spi/licenses/jackson-annotations-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/jackson-LICENSE similarity index 100% rename from libs/arrow-spi/licenses/jackson-annotations-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/jackson-LICENSE diff --git a/libs/arrow-spi/licenses/jackson-annotations-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/jackson-NOTICE similarity index 100% rename from libs/arrow-spi/licenses/jackson-annotations-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/jackson-NOTICE diff --git a/libs/arrow-spi/licenses/jackson-annotations-2.18.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/jackson-annotations-2.18.2.jar.sha1 similarity index 100% rename from libs/arrow-spi/licenses/jackson-annotations-2.18.2.jar.sha1 rename to plugins/arrow-flight-rpc/licenses/jackson-annotations-2.18.2.jar.sha1 diff --git a/libs/arrow-spi/licenses/jackson-databind-2.18.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/jackson-databind-2.18.2.jar.sha1 similarity index 100% rename from libs/arrow-spi/licenses/jackson-databind-2.18.2.jar.sha1 rename to plugins/arrow-flight-rpc/licenses/jackson-databind-2.18.2.jar.sha1 diff --git a/plugins/arrow-flight-rpc/licenses/jsr305-3.0.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/jsr305-3.0.2.jar.sha1 new file mode 100644 index 0000000000000..c5c92d87b9d6c --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/jsr305-3.0.2.jar.sha1 @@ -0,0 +1 @@ +25ea2e8b0c338a877313bd4672d3fe056ea78f0d \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/jsr305-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/jsr305-LICENSE.txt new file mode 100644 index 0000000000000..0cb8710c4b3e5 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/jsr305-LICENSE.txt @@ -0,0 +1,29 @@ +Copyright (c) 2007-2009, JSR305 expert group +All rights reserved. + +http://www.opensource.org/licenses/bsd-license.php + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the JSR305 expert group nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + diff --git a/plugins/arrow-flight-rpc/licenses/jsr305-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/jsr305-NOTICE.txt new file mode 100644 index 0000000000000..8d1c8b69c3fce --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/jsr305-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/libs/arrow-spi/licenses/netty-buffer-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/netty-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/netty-buffer-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/netty-LICENSE.txt diff --git a/libs/arrow-spi/licenses/netty-buffer-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/netty-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/netty-buffer-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/netty-NOTICE.txt diff --git a/libs/arrow-spi/licenses/netty-buffer-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-buffer-4.1.118.Final.jar.sha1 similarity index 100% rename from libs/arrow-spi/licenses/netty-buffer-4.1.118.Final.jar.sha1 rename to plugins/arrow-flight-rpc/licenses/netty-buffer-4.1.118.Final.jar.sha1 diff --git a/plugins/arrow-flight-rpc/licenses/netty-codec-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-codec-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..7964f25f0372a --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-codec-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +307f665c08ce57333121de4f460479fc0c3c94d4 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/netty-codec-http-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-codec-http-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..7cb43dd276c8a --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-codec-http-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +eda08a71294afe78c779b85fd696bc13491507a8 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/netty-codec-http2-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-codec-http2-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..fab58dee2dfbf --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-codec-http2-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +e3c35c0685ec9e84c4f84b79feea7c9d185a08d3 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/netty-common-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-common-4.1.118.Final.jar.sha1 similarity index 100% rename from libs/arrow-spi/licenses/netty-common-4.1.118.Final.jar.sha1 rename to plugins/arrow-flight-rpc/licenses/netty-common-4.1.118.Final.jar.sha1 diff --git a/plugins/arrow-flight-rpc/licenses/netty-handler-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-handler-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..d6eea2494813e --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-handler-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +30ebb05b6b0fb071dbfcf713017c4a767a97bb9b \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/netty-resolver-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-resolver-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..19fbdbbb19b04 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-resolver-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +28c378c19c1779eca1104b400452627f3ebc4aea \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/netty-tcnative-classes-2.0.66.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-tcnative-classes-2.0.66.Final.jar.sha1 new file mode 100644 index 0000000000000..7bc4213520498 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-tcnative-classes-2.0.66.Final.jar.sha1 @@ -0,0 +1 @@ +9588bd2f891157538a78d86c945aa34bf9308dda \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/netty-transport-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-transport-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..f3b714539e61b --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-transport-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +5a27232e5d08218722d94ca14f0b1b4576e7711c \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/netty-transport-classes-epoll-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-transport-classes-epoll-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..d53656cd3b7dc --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-transport-classes-epoll-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +376ce95507066f0e755d97c1c8bcd6c33f657617 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/netty-transport-native-unix-common-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-transport-native-unix-common-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..f1562364e2848 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-transport-native-unix-common-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +9da25a94e6a0edac90da0bc7894e5a54efcb866b \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/parquet-arrow-1.13.1.jar.sha1 b/plugins/arrow-flight-rpc/licenses/parquet-arrow-1.13.1.jar.sha1 new file mode 100644 index 0000000000000..a1b89891ca8e1 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/parquet-arrow-1.13.1.jar.sha1 @@ -0,0 +1 @@ +9e59add52791af8b05c1aefe2a2f8865602c9368 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/parquet-arrow-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/parquet-arrow-LICENSE.txt new file mode 100644 index 0000000000000..b0065815a5e92 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/parquet-arrow-LICENSE.txt @@ -0,0 +1,218 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +-------------------------------------------------------------------------------- + +This product includes code from Apache Avro. + +Copyright: 2014 The Apache Software Foundation. +Home page: https://avro.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Daniel Lemire's JavaFastPFOR project. The +"Lemire" bit packing source code produced by parquet-generator is derived from +the JavaFastPFOR project. + +Copyright: 2013 Daniel Lemire +Home page: http://lemire.me/en/ +Project page: https://github.com/lemire/JavaFastPFOR +License: Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This product includes code from Apache Spark. + +* dev/merge_parquet_pr.py is based on Spark's dev/merge_spark_pr.py + +Copyright: 2014 The Apache Software Foundation. +Home page: https://spark.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This product includes code from Twitter's ElephantBird project. + +* parquet-hadoop's UnmaterializableRecordCounter.java includes code from + ElephantBird's LzoRecordReader.java + +Copyright: 2012-2014 Twitter +Home page: https://github.com/twitter/elephant-bird +License: http://www.apache.org/licenses/LICENSE-2.0 + diff --git a/plugins/arrow-flight-rpc/licenses/parquet-arrow-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/parquet-arrow-NOTICE.txt new file mode 100644 index 0000000000000..46300d6cd98fd --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/parquet-arrow-NOTICE.txt @@ -0,0 +1,94 @@ + +Apache Parquet Java +Copyright 2014-2024 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +-------------------------------------------------------------------------------- + +This product includes parquet-tools, initially developed at ARRIS, Inc. with +the following copyright notice: + + Copyright 2013 ARRIS, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- + +This product includes parquet-protobuf, initially developed by Lukas Nalezenc +with the following copyright notice: + + Copyright 2013 Lukas Nalezenec. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- + +This product includes code from Apache Avro, which includes the following in +its NOTICE file: + + Apache Avro + Copyright 2010-2015 The Apache Software Foundation + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + +-------------------------------------------------------------------------------- + +This project includes code from Kite, developed at Cloudera, Inc. with +the following copyright notice: + +| Copyright 2013 Cloudera Inc. +| +| Licensed under the Apache License, Version 2.0 (the "License"); +| you may not use this file except in compliance with the License. +| You may obtain a copy of the License at +| +| http://www.apache.org/licenses/LICENSE-2.0 +| +| Unless required by applicable law or agreed to in writing, software +| distributed under the License is distributed on an "AS IS" BASIS, +| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +| See the License for the specific language governing permissions and +| limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from Netflix, Inc. with the following copyright +notice: + +| Copyright 2016 Netflix, Inc. +| +| Licensed under the Apache License, Version 2.0 (the "License"); +| you may not use this file except in compliance with the License. +| You may obtain a copy of the License at +| +| http://www.apache.org/licenses/LICENSE-2.0 +| +| Unless required by applicable law or agreed to in writing, software +| distributed under the License is distributed on an "AS IS" BASIS, +| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +| See the License for the specific language governing permissions and +| limitations under the License. + diff --git a/plugins/arrow-flight-rpc/licenses/perfmark-api-0.27.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/perfmark-api-0.27.0.jar.sha1 new file mode 100644 index 0000000000000..c85ee41fd9bbd --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/perfmark-api-0.27.0.jar.sha1 @@ -0,0 +1 @@ +f86f575a41b091786a4b027cd9c0c1d2e3fc1c01 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/perfmark-api-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/perfmark-api-LICENSE.txt new file mode 100644 index 0000000000000..261eeb9e9f8b2 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/perfmark-api-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/arrow-flight-rpc/licenses/perfmark-api-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/perfmark-api-NOTICE.txt new file mode 100644 index 0000000000000..04fbb4e692e51 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/perfmark-api-NOTICE.txt @@ -0,0 +1,41 @@ + +Copyright 2019 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +----------------------------------------------------------------------- + +This product contains a modified portion of 'Catapult', an open source +Trace Event viewer for Chome, Linux, and Android applications, which can +be obtained at: + + * LICENSE: + * traceviewer/src/main/resources/io/perfmark/traceviewer/third_party/catapult/LICENSE (New BSD License) + * HOMEPAGE: + * https://github.com/catapult-project/catapult + +This product contains a modified portion of 'Polymer', a library for Web +Components, which can be obtained at: + * LICENSE: + * traceviewer/src/main/resources/io/perfmark/traceviewer/third_party/polymer/LICENSE (New BSD License) + * HOMEPAGE: + * https://github.com/Polymer/polymer + + +This product contains a modified portion of 'ASM', an open source +Java Bytecode library, which can be obtained at: + + * LICENSE: + * agent/src/main/resources/io/perfmark/agent/third_party/asm/LICENSE (BSD style License) + * HOMEPAGE: + * https://asm.ow2.io/ \ No newline at end of file diff --git a/libs/arrow-spi/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/arrow-flight-rpc/licenses/slf4j-api-1.7.36.jar.sha1 similarity index 100% rename from libs/arrow-spi/licenses/slf4j-api-1.7.36.jar.sha1 rename to plugins/arrow-flight-rpc/licenses/slf4j-api-1.7.36.jar.sha1 diff --git a/libs/arrow-spi/licenses/slf4j-api-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/slf4j-api-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/slf4j-api-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/slf4j-api-LICENSE.txt diff --git a/plugins/arrow-flight-rpc/licenses/slf4j-api-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/slf4j-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java b/plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java new file mode 100644 index 0000000000000..bcad335c7a917 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight; + +import org.apache.arrow.flight.CallOptions; +import org.apache.arrow.flight.FlightClient; +import org.opensearch.arrow.flight.bootstrap.FlightClientManager; +import org.opensearch.arrow.flight.bootstrap.FlightService; +import org.opensearch.arrow.flight.bootstrap.FlightStreamPlugin; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.BeforeClass; + +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 5) +public class ArrowFlightServerIT extends OpenSearchIntegTestCase { + + private FlightClientManager flightClientManager; + + @BeforeClass + public static void setupFeatureFlags() { + FeatureFlagSetter.set(FeatureFlags.ARROW_STREAMS_SETTING.getKey()); + } + + @Override + protected Collection> nodePlugins() { + return Collections.singleton(FlightStreamPlugin.class); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + ensureGreen(); + Thread.sleep(1000); + FlightService flightService = internalCluster().getInstance(FlightService.class); + flightClientManager = flightService.getFlightClientManager(); + } + + public void testArrowFlightEndpoint() throws Exception { + for (DiscoveryNode node : getClusterState().nodes()) { + try (FlightClient flightClient = flightClientManager.getFlightClient(node.getId()).get()) { + assertNotNull(flightClient); + flightClient.handshake(CallOptions.timeout(5000L, TimeUnit.MILLISECONDS)); + } + } + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightClient.java b/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightClient.java new file mode 100644 index 0000000000000..0efafd370c651 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightClient.java @@ -0,0 +1,250 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.arrow.flight; + +import io.grpc.netty.GrpcSslContexts; +import io.grpc.netty.NettyChannelBuilder; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.ServerChannel; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.util.InsecureTrustManagerFactory; +import java.io.InputStream; +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import javax.net.ssl.SSLException; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.util.Preconditions; + +/** + * Clone of {@link org.apache.arrow.flight.FlightClient} to support setting SslContext and other settings like SslContext, workerELG, + * executorService and channelType directly. It can be discarded once FlightClient.Builder supports setting SslContext directly. + * Note: This file needs to be cloned with version upgrade of arrow flight-core with above changes. + */ +public class OSFlightClient { + /** A builder for Flight clients. */ + public final static class Builder { + private BufferAllocator allocator; + private Location location; + private boolean forceTls = false; + private int maxInboundMessageSize = OSFlightServer.MAX_GRPC_MESSAGE_SIZE; + private InputStream trustedCertificates = null; + private InputStream clientCertificate = null; + private InputStream clientKey = null; + private String overrideHostname = null; + private List middleware = new ArrayList<>(); + private boolean verifyServer = true; + + private EventLoopGroup workerELG; + private ExecutorService executorService; + private Class channelType; + private SslContext sslContext; + + private Builder() {} + + Builder(BufferAllocator allocator, Location location) { + this.allocator = Preconditions.checkNotNull(allocator); + this.location = Preconditions.checkNotNull(location); + } + + /** Force the client to connect over TLS. */ + public Builder useTls() { + this.forceTls = true; + return this; + } + + /** Override the hostname checked for TLS. Use with caution in production. */ + public Builder overrideHostname(final String hostname) { + this.overrideHostname = hostname; + return this; + } + + /** Set the maximum inbound message size. */ + public Builder maxInboundMessageSize(int maxSize) { + Preconditions.checkArgument(maxSize > 0); + this.maxInboundMessageSize = maxSize; + return this; + } + + /** Set the trusted TLS certificates. */ + public Builder trustedCertificates(final InputStream stream) { + this.trustedCertificates = Preconditions.checkNotNull(stream); + return this; + } + + /** Set the trusted TLS certificates. */ + public Builder clientCertificate( + final InputStream clientCertificate, final InputStream clientKey) { + Preconditions.checkNotNull(clientKey); + this.clientCertificate = Preconditions.checkNotNull(clientCertificate); + this.clientKey = Preconditions.checkNotNull(clientKey); + return this; + } + + public Builder allocator(BufferAllocator allocator) { + this.allocator = Preconditions.checkNotNull(allocator); + return this; + } + + public Builder location(Location location) { + this.location = Preconditions.checkNotNull(location); + return this; + } + + public Builder intercept(FlightClientMiddleware.Factory factory) { + middleware.add(factory); + return this; + } + + public Builder verifyServer(boolean verifyServer) { + this.verifyServer = verifyServer; + return this; + } + + /** Create the client from this builder. */ + public FlightClient build() { + final NettyChannelBuilder builder; + + switch (location.getUri().getScheme()) { + case LocationSchemes.GRPC: + case LocationSchemes.GRPC_INSECURE: + case LocationSchemes.GRPC_TLS: + { + builder = NettyChannelBuilder.forAddress(location.toSocketAddress()); + break; + } + case LocationSchemes.GRPC_DOMAIN_SOCKET: + { + // The implementation is platform-specific, so we have to find the classes at runtime + builder = NettyChannelBuilder.forAddress(location.toSocketAddress()); + try { + try { + // Linux + builder.channelType( + Class.forName("io.netty.channel.epoll.EpollDomainSocketChannel") + .asSubclass(ServerChannel.class)); + final EventLoopGroup elg = + Class.forName("io.netty.channel.epoll.EpollEventLoopGroup") + .asSubclass(EventLoopGroup.class) + .getDeclaredConstructor() + .newInstance(); + builder.eventLoopGroup(elg); + } catch (ClassNotFoundException e) { + // BSD + builder.channelType( + Class.forName("io.netty.channel.kqueue.KQueueDomainSocketChannel") + .asSubclass(ServerChannel.class)); + final EventLoopGroup elg = + Class.forName("io.netty.channel.kqueue.KQueueEventLoopGroup") + .asSubclass(EventLoopGroup.class) + .getDeclaredConstructor() + .newInstance(); + builder.eventLoopGroup(elg); + } + } catch (ClassNotFoundException + | InstantiationException + | IllegalAccessException + | NoSuchMethodException + | InvocationTargetException e) { + throw new UnsupportedOperationException( + "Could not find suitable Netty native transport implementation for domain socket address."); + } + break; + } + default: + throw new IllegalArgumentException( + "Scheme is not supported: " + location.getUri().getScheme()); + } + + if (this.forceTls || LocationSchemes.GRPC_TLS.equals(location.getUri().getScheme())) { + builder.useTransportSecurity(); + + final boolean hasTrustedCerts = this.trustedCertificates != null; + final boolean hasKeyCertPair = this.clientCertificate != null && this.clientKey != null; + if (!this.verifyServer && (hasTrustedCerts || hasKeyCertPair)) { + throw new IllegalArgumentException( + "FlightClient has been configured to disable server verification, " + + "but certificate options have been specified."); + } + + if (sslContext != null) { + builder.sslContext(sslContext); + } else { + final SslContextBuilder sslContextBuilder = GrpcSslContexts.forClient(); + + if (!this.verifyServer) { + sslContextBuilder.trustManager(InsecureTrustManagerFactory.INSTANCE); + } else if (this.trustedCertificates != null + || this.clientCertificate != null + || this.clientKey != null) { + if (this.trustedCertificates != null) { + sslContextBuilder.trustManager(this.trustedCertificates); + } + if (this.clientCertificate != null && this.clientKey != null) { + sslContextBuilder.keyManager(this.clientCertificate, this.clientKey); + } + } + try { + builder.sslContext(sslContextBuilder.build()); + } catch (SSLException e) { + throw new RuntimeException(e); + } + } + + if (this.overrideHostname != null) { + builder.overrideAuthority(this.overrideHostname); + } + } else { + builder.usePlaintext(); + } + + builder + .maxTraceEvents(0) + .maxInboundMessageSize(maxInboundMessageSize) + .maxInboundMetadataSize(maxInboundMessageSize) + .executor(executorService); + + if (channelType != null) { + builder.channelType(channelType); + } + + if (workerELG != null) { + builder.eventLoopGroup(workerELG); + } + + return new FlightClient(allocator, builder.build(), middleware); + } + + public Builder executor(ExecutorService executorService) { + this.executorService = executorService; + return this; + } + + public Builder channelType(Class channelType) { + this.channelType = channelType; + return this; + } + + public Builder eventLoopGroup(EventLoopGroup workerELG) { + this.workerELG = workerELG; + return this; + } + + public Builder sslContext(SslContext sslContext) { + this.sslContext = sslContext; + return this; + } + } + + public static Builder builder() { + return new Builder(); + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightServer.java b/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightServer.java new file mode 100644 index 0000000000000..77e0e38314b44 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightServer.java @@ -0,0 +1,478 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.arrow.flight; + +import io.grpc.Server; +import io.grpc.ServerInterceptors; +import io.grpc.netty.GrpcSslContexts; +import io.grpc.netty.NettyServerBuilder; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.ServerChannel; +import io.netty.handler.ssl.ClientAuth; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.reflect.InvocationTargetException; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.function.Consumer; +import javax.net.ssl.SSLException; + +import org.apache.arrow.flight.auth.ServerAuthHandler; +import org.apache.arrow.flight.auth.ServerAuthInterceptor; +import org.apache.arrow.flight.auth2.Auth2Constants; +import org.apache.arrow.flight.auth2.CallHeaderAuthenticator; +import org.apache.arrow.flight.auth2.ServerCallHeaderAuthMiddleware; +import org.apache.arrow.flight.grpc.ServerBackpressureThresholdInterceptor; +import org.apache.arrow.flight.grpc.ServerInterceptorAdapter; +import org.apache.arrow.flight.grpc.ServerInterceptorAdapter.KeyFactory; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.util.Preconditions; + +/** + * Clone of {@link org.apache.arrow.flight.FlightServer} to support setting SslContext. It can be discarded once FlightServer.Builder supports setting SslContext directly. + *

+ * It changes {@link org.apache.arrow.flight.FlightServer.Builder} to allow hook to configure the NettyServerBuilder. + */ +@SuppressWarnings("removal") +public class OSFlightServer { + /** The maximum size of an individual gRPC message. This effectively disables the limit. */ + static final int MAX_GRPC_MESSAGE_SIZE = Integer.MAX_VALUE; + /** The default number of bytes that can be queued on an output stream before blocking. */ + static final int DEFAULT_BACKPRESSURE_THRESHOLD = 10 * 1024 * 1024; // 10MB + + private static final MethodHandle FLIGHT_SERVER_CTOR_MH; + + static { + FLIGHT_SERVER_CTOR_MH = AccessController.doPrivileged((PrivilegedAction) () -> { + try { + return MethodHandles + .privateLookupIn(FlightServer.class, MethodHandles.lookup()) + .findConstructor(FlightServer.class, MethodType.methodType(void.class, Location.class, Server.class, ExecutorService.class)); + } catch (final NoSuchMethodException | IllegalAccessException ex) { + throw new IllegalStateException("Unable to find the FlightServer constructor to invoke", ex); + }} + ); + } + + /** A builder for Flight servers. */ + public final static class Builder { + private BufferAllocator allocator; + private Location location; + private FlightProducer producer; + private final Map builderOptions; + private ServerAuthHandler authHandler = ServerAuthHandler.NO_OP; + private CallHeaderAuthenticator headerAuthenticator = CallHeaderAuthenticator.NO_OP; + private ExecutorService executor = null; + private int maxInboundMessageSize = MAX_GRPC_MESSAGE_SIZE; + private int maxHeaderListSize = MAX_GRPC_MESSAGE_SIZE; + private int backpressureThreshold = DEFAULT_BACKPRESSURE_THRESHOLD; + private InputStream certChain; + private InputStream key; + private InputStream mTlsCACert; + private SslContext sslContext; + private final List> interceptors; + // Keep track of inserted interceptors + private final Set interceptorKeys; + + Builder() { + builderOptions = new HashMap<>(); + interceptors = new ArrayList<>(); + interceptorKeys = new HashSet<>(); + } + + Builder(BufferAllocator allocator, Location location, FlightProducer producer) { + this(); + this.allocator = Preconditions.checkNotNull(allocator); + this.location = Preconditions.checkNotNull(location); + this.producer = Preconditions.checkNotNull(producer); + } + + /** Create the server for this builder. */ + @SuppressWarnings("unchecked") + public FlightServer build() { + // Add the auth middleware if applicable. + if (headerAuthenticator != CallHeaderAuthenticator.NO_OP) { + this.middleware( + FlightServerMiddleware.Key.of(Auth2Constants.AUTHORIZATION_HEADER), + new ServerCallHeaderAuthMiddleware.Factory(headerAuthenticator)); + } + + this.middleware(FlightConstants.HEADER_KEY, new ServerHeaderMiddleware.Factory()); + + final NettyServerBuilder builder; + switch (location.getUri().getScheme()) { + case LocationSchemes.GRPC_DOMAIN_SOCKET: + { + // The implementation is platform-specific, so we have to find the classes at runtime + builder = NettyServerBuilder.forAddress(location.toSocketAddress()); + try { + try { + // Linux + builder.channelType( + Class.forName("io.netty.channel.epoll.EpollServerDomainSocketChannel") + .asSubclass(ServerChannel.class)); + final EventLoopGroup elg = + Class.forName("io.netty.channel.epoll.EpollEventLoopGroup") + .asSubclass(EventLoopGroup.class) + .getConstructor() + .newInstance(); + builder.bossEventLoopGroup(elg).workerEventLoopGroup(elg); + } catch (ClassNotFoundException e) { + // BSD + builder.channelType( + Class.forName("io.netty.channel.kqueue.KQueueServerDomainSocketChannel") + .asSubclass(ServerChannel.class)); + final EventLoopGroup elg = + Class.forName("io.netty.channel.kqueue.KQueueEventLoopGroup") + .asSubclass(EventLoopGroup.class) + .getConstructor() + .newInstance(); + builder.bossEventLoopGroup(elg).workerEventLoopGroup(elg); + } + } catch (ClassNotFoundException + | InstantiationException + | IllegalAccessException + | NoSuchMethodException + | InvocationTargetException e) { + throw new UnsupportedOperationException( + "Could not find suitable Netty native transport implementation for domain socket address."); + } + break; + } + case LocationSchemes.GRPC: + case LocationSchemes.GRPC_INSECURE: + { + builder = NettyServerBuilder.forAddress(location.toSocketAddress()); + break; + } + case LocationSchemes.GRPC_TLS: + { + if (certChain == null) { + throw new IllegalArgumentException( + "Must provide a certificate and key to serve gRPC over TLS"); + } + builder = NettyServerBuilder.forAddress(location.toSocketAddress()); + break; + } + default: + throw new IllegalArgumentException( + "Scheme is not supported: " + location.getUri().getScheme()); + } + + if (certChain != null && sslContext == null) { + SslContextBuilder sslContextBuilder = GrpcSslContexts.forServer(certChain, key); + + if (mTlsCACert != null) { + sslContextBuilder.clientAuth(ClientAuth.REQUIRE).trustManager(mTlsCACert); + } + try { + sslContext = sslContextBuilder.build(); + } catch (SSLException e) { + throw new RuntimeException(e); + } finally { + closeMTlsCACert(); + closeCertChain(); + closeKey(); + } + + builder.sslContext(sslContext); + } else if (sslContext != null) { + builder.sslContext(sslContext); + } + + // Share one executor between the gRPC service, DoPut, and Handshake + final ExecutorService exec; + // We only want to have FlightServer close the gRPC executor if we created it here. We should + // not close + // user-supplied executors. + final ExecutorService grpcExecutor; + if (executor != null) { + exec = executor; + grpcExecutor = null; + } else { + throw new IllegalStateException("GRPC executor must be passed to start Flight server."); + } + + final FlightBindingService flightService = + new FlightBindingService(allocator, producer, authHandler, exec); + builder + .executor(exec) + .maxInboundMessageSize(maxInboundMessageSize) + .maxInboundMetadataSize(maxHeaderListSize) + .addService( + ServerInterceptors.intercept( + flightService, + new ServerBackpressureThresholdInterceptor(backpressureThreshold), + new ServerAuthInterceptor(authHandler))); + + // Allow hooking into the gRPC builder. This is not guaranteed to be available on all Arrow + // versions or + // Flight implementations. + builderOptions.computeIfPresent( + "grpc.builderConsumer", + (key, builderConsumer) -> { + final Consumer consumer = + (Consumer) builderConsumer; + consumer.accept(builder); + return null; + }); + + // Allow explicitly setting some Netty-specific options + builderOptions.computeIfPresent( + "netty.channelType", + (key, channelType) -> { + builder.channelType((Class) channelType); + return null; + }); + builderOptions.computeIfPresent( + "netty.bossEventLoopGroup", + (key, elg) -> { + builder.bossEventLoopGroup((EventLoopGroup) elg); + return null; + }); + builderOptions.computeIfPresent( + "netty.workerEventLoopGroup", + (key, elg) -> { + builder.workerEventLoopGroup((EventLoopGroup) elg); + return null; + }); + + builder.intercept(new ServerInterceptorAdapter(interceptors)); + + try { + return (FlightServer)FLIGHT_SERVER_CTOR_MH.invoke(location, builder.build(), grpcExecutor); + } catch (final Throwable ex) { + throw new IllegalStateException("Unable to instantiate FlightServer", ex); + } + } + + public Builder channelType(Class channelType) { + builderOptions.put("netty.channelType", channelType); + return this; + } + + public Builder workerEventLoopGroup(EventLoopGroup workerELG) { + builderOptions.put("netty.workerEventLoopGroup", workerELG); + return this; + } + + public Builder bossEventLoopGroup(EventLoopGroup bossELG) { + builderOptions.put("netty.bossEventLoopGroup", bossELG); + return this; + } + + public Builder setMaxHeaderListSize(int maxHeaderListSize) { + this.maxHeaderListSize = maxHeaderListSize; + return this; + } + + /** + * Set the maximum size of a message. Defaults to "unlimited", depending on the underlying + * transport. + */ + public Builder maxInboundMessageSize(int maxMessageSize) { + this.maxInboundMessageSize = maxMessageSize; + return this; + } + + /** + * Set the number of bytes that may be queued on a server output stream before writes are + * blocked. + */ + public Builder backpressureThreshold(int backpressureThreshold) { + Preconditions.checkArgument(backpressureThreshold > 0); + this.backpressureThreshold = backpressureThreshold; + return this; + } + + /** + * A small utility function to ensure that InputStream attributes. are closed if they are not + * null + * + * @param stream The InputStream to close (if it is not null). + */ + private void closeInputStreamIfNotNull(InputStream stream) { + if (stream != null) { + try { + stream.close(); + } catch (IOException expected) { + // stream closes gracefully, doesn't expect an exception. + } + } + } + + /** + * A small utility function to ensure that the certChain attribute is closed if it is not null. + * It then sets the attribute to null. + */ + private void closeCertChain() { + closeInputStreamIfNotNull(certChain); + certChain = null; + } + + /** + * A small utility function to ensure that the key attribute is closed if it is not null. It + * then sets the attribute to null. + */ + private void closeKey() { + closeInputStreamIfNotNull(key); + key = null; + } + + /** + * A small utility function to ensure that the mTlsCACert attribute is closed if it is not null. + * It then sets the attribute to null. + */ + private void closeMTlsCACert() { + closeInputStreamIfNotNull(mTlsCACert); + mTlsCACert = null; + } + + /** + * Enable TLS on the server. + * + * @param certChain The certificate chain to use. + * @param key The private key to use. + */ + public Builder useTls(final File certChain, final File key) throws IOException { + closeCertChain(); + this.certChain = new FileInputStream(certChain); + + closeKey(); + this.key = new FileInputStream(key); + + return this; + } + + /** + * Enable Client Verification via mTLS on the server. + * + * @param mTlsCACert The CA certificate to use for verifying clients. + */ + public Builder useMTlsClientVerification(final File mTlsCACert) throws IOException { + closeMTlsCACert(); + this.mTlsCACert = new FileInputStream(mTlsCACert); + return this; + } + + /** + * Enable TLS on the server. + * + * @param certChain The certificate chain to use. + * @param key The private key to use. + */ + public Builder useTls(final InputStream certChain, final InputStream key) throws IOException { + closeCertChain(); + this.certChain = certChain; + + closeKey(); + this.key = key; + + return this; + } + + /** + * Enable mTLS on the server. + * + * @param mTlsCACert The CA certificate to use for verifying clients. + */ + public Builder useMTlsClientVerification(final InputStream mTlsCACert) throws IOException { + closeMTlsCACert(); + this.mTlsCACert = mTlsCACert; + return this; + } + + /** + * Set the executor used by the server. + * + *

Flight will NOT take ownership of the executor. The application must clean it up if one is + * provided. (If not provided, Flight will use a default executor which it will clean up.) + */ + public Builder executor(ExecutorService executor) { + this.executor = executor; + return this; + } + + /** Set the authentication handler. */ + public Builder authHandler(ServerAuthHandler authHandler) { + this.authHandler = authHandler; + return this; + } + + /** Set the header-based authentication mechanism. */ + public Builder headerAuthenticator(CallHeaderAuthenticator headerAuthenticator) { + this.headerAuthenticator = headerAuthenticator; + return this; + } + + /** Provide a transport-specific option. Not guaranteed to have any effect. */ + public Builder transportHint(final String key, Object option) { + builderOptions.put(key, option); + return this; + } + + /** + * Add a Flight middleware component to inspect and modify requests to this service. + * + * @param key An identifier for this middleware component. Service implementations can retrieve + * the middleware instance for the current call using {@link + * org.apache.arrow.flight.FlightProducer.CallContext}. + * @param factory A factory for the middleware. + * @param The middleware type. + * @throws IllegalArgumentException if the key already exists + */ + public Builder middleware( + final FlightServerMiddleware.Key key, final FlightServerMiddleware.Factory factory) { + if (interceptorKeys.contains(key.key)) { + throw new IllegalArgumentException("Key already exists: " + key.key); + } + interceptors.add(new KeyFactory<>(key, factory)); + interceptorKeys.add(key.key); + return this; + } + + public Builder allocator(BufferAllocator allocator) { + this.allocator = Preconditions.checkNotNull(allocator); + return this; + } + + public Builder location(Location location) { + this.location = Preconditions.checkNotNull(location); + return this; + } + + public Builder producer(FlightProducer producer) { + this.producer = Preconditions.checkNotNull(producer); + return this; + } + + public Builder sslContext(SslContext sslContext) { + this.sslContext = sslContext; + return this; + } + } + + public static Builder builder() { + return new Builder(); + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/package-info.java b/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/package-info.java new file mode 100644 index 0000000000000..789a88a2d1159 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Clone of FlightServer and FlightClient due to package private access of + * certain configurations. + */ +package org.apache.arrow.flight; diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/FlightServerInfoAction.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/FlightServerInfoAction.java new file mode 100644 index 0000000000000..529bee72c708d --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/FlightServerInfoAction.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.arrow.flight.api; + +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.transport.client.node.NodeClient; + +import java.util.List; + +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * It handles GET requests for retrieving Flight server information. + */ +public class FlightServerInfoAction extends BaseRestHandler { + + /** + * Constructor for FlightServerInfoAction. + */ + public FlightServerInfoAction() {} + + /** + * Returns the name of the action. + * @return The name of the action. + */ + @Override + public String getName() { + return "flight_server_info_action"; + } + + /** + * Returns the list of routes for the action. + * @return The list of routes for the action. + */ + @Override + public List routes() { + return List.of(new Route(GET, "/_flight/info"), new Route(GET, "/_flight/info/{nodeId}")); + } + + /** + * Prepares the request for the action. + * @param request The REST request. + * @param client The node client. + * @return The rest channel consumer. + */ + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + String nodeId = request.param("nodeId"); + if (nodeId != null) { + // Query specific node + NodesFlightInfoRequest nodesRequest = new NodesFlightInfoRequest(nodeId); + return channel -> client.execute(NodesFlightInfoAction.INSTANCE, nodesRequest, new RestToXContentListener<>(channel)); + } else { + NodesFlightInfoRequest nodesRequest = new NodesFlightInfoRequest(); + return channel -> client.execute(NodesFlightInfoAction.INSTANCE, nodesRequest, new RestToXContentListener<>(channel)); + } + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodeFlightInfo.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodeFlightInfo.java new file mode 100644 index 0000000000000..e804b0c518523 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodeFlightInfo.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Represents the response for a node's flight information. + */ +public class NodeFlightInfo extends BaseNodeResponse implements ToXContentObject { + private final BoundTransportAddress boundAddress; + + /** + * Constructor for NodeFlightInfo. + * @param in The stream input to read from. + * @throws IOException If an I/O error occurs. + */ + public NodeFlightInfo(StreamInput in) throws IOException { + super(in); + boundAddress = new BoundTransportAddress(in); + } + + /** + * Constructor for NodeFlightInfo. + * @param node The discovery node. + * @param boundAddress The bound transport address. + */ + public NodeFlightInfo(DiscoveryNode node, BoundTransportAddress boundAddress) { + super(node); + this.boundAddress = boundAddress; + } + + /** + * Writes the node flight information to the stream. + * @param out The stream output to write to. + * @throws IOException If an I/O error occurs. + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + boundAddress.writeTo(out); + } + + /** + * Returns the bound transport address. + * @return The bound transport address. + */ + public BoundTransportAddress getBoundAddress() { + return boundAddress; + } + + /** + * Converts the node flight information to XContent. + * @param builder The XContent builder. + * @param params The parameters for the XContent conversion. + * @return The XContent builder. + * @throws IOException If an I/O error occurs. + */ + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.startObject("flight_server"); + + builder.startArray("bound_addresses"); + for (TransportAddress address : boundAddress.boundAddresses()) { + builder.startObject(); + builder.field("host", address.address().getHostString()); + builder.field("port", address.address().getPort()); + builder.endObject(); + } + builder.endArray(); + + TransportAddress publishAddress = boundAddress.publishAddress(); + builder.startObject("publish_address"); + builder.field("host", publishAddress.address().getHostString()); + builder.field("port", publishAddress.address().getPort()); + builder.endObject(); + + builder.endObject(); + builder.endObject(); + return builder; + } + +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoAction.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoAction.java new file mode 100644 index 0000000000000..3148c58a1509d --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoAction.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.action.ActionType; + +/** + * Action to retrieve flight info from nodes + */ +public class NodesFlightInfoAction extends ActionType { + /** + * Singleton instance of NodesFlightInfoAction. + */ + public static final NodesFlightInfoAction INSTANCE = new NodesFlightInfoAction(); + /** + * Name of this action. + */ + public static final String NAME = "cluster:admin/flight/info"; + + NodesFlightInfoAction() { + super(NAME, NodesFlightInfoResponse::new); + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequest.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequest.java new file mode 100644 index 0000000000000..1b707f461819c --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequest.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; + +/** + * Flight Info Request + */ +public class NodesFlightInfoRequest extends BaseNodesRequest { + + /** + * Constructor for NodesFlightInfoRequest + * @param in StreamInput + * @throws IOException If an I/O error occurs + */ + public NodesFlightInfoRequest(StreamInput in) throws IOException { + super(in); + } + + /** + * Constructor for NodesFlightInfoRequest + * @param nodesIds String array of node IDs + */ + public NodesFlightInfoRequest(String... nodesIds) { + super(nodesIds); + } + + /** + * Writes the request to the given StreamOutput + */ + public static class NodeFlightInfoRequest extends TransportRequest { + NodesFlightInfoRequest request; + + /** + * Constructor for NodeFlightInfoRequest + * @param in StreamInput to read from + * @throws IOException If an I/O error occurs + */ + public NodeFlightInfoRequest(StreamInput in) throws IOException { + super(in); + } + + /** + * Constructor for NodeFlightInfoRequest + * @param request NodesFlightInfoRequest + */ + public NodeFlightInfoRequest(NodesFlightInfoRequest request) { + this.request = request; + } + } + + /** + * Writes the request to the given StreamOutput + * @param out StreamOutput to write to + * @throws IOException If an I/O error occurs + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponse.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponse.java new file mode 100644 index 0000000000000..721cd631924bd --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponse.java @@ -0,0 +1,111 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; + +/** + * Represents the response for nodes flight information. + */ +public class NodesFlightInfoResponse extends BaseNodesResponse implements ToXContentObject { + /** + * Constructs a new NodesFlightInfoResponse instance. + * + * @param in The stream input to read from. + * @throws IOException If an I/O error occurs. + */ + public NodesFlightInfoResponse(StreamInput in) throws IOException { + super(in); + } + + /** + * Constructs a new NodesFlightInfoResponse instance. + * + * @param clusterName The cluster name. + * @param nodes The list of node flight information. + * @param failures The list of failed node exceptions. + */ + public NodesFlightInfoResponse(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); + } + + /** + * Reads the nodes from the given stream input. + * + * @param in The stream input to read from. + * @return The list of node flight information. + * @throws IOException If an I/O error occurs. + */ + @Override + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readList(NodeFlightInfo::new); + } + + /** + * Writes the nodes to the given stream output. + * + * @param out The stream output to write to. + * @param nodes The list of node flight information. + * @throws IOException If an I/O error occurs. + */ + @Override + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeList(nodes); + } + + /** + * Converts the nodes flight information response to XContent. + * @param builder The XContent builder. + * @param params The parameters for the XContent conversion. + * @return The XContent builder. + * @throws IOException If an I/O error occurs. + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + + builder.startObject(); + builder.startObject("_nodes"); + builder.field("total", getNodes().size()); + builder.field("successful", getNodes().size()); + builder.field("failed", failures().size()); + builder.endObject(); + + builder.field("cluster_name", getClusterName().value()); + + builder.startObject("nodes"); + for (NodeFlightInfo nodeInfo : getNodes()) { + builder.field(nodeInfo.getNode().getId()); + nodeInfo.toXContent(builder, params); + } + builder.endObject(); + + if (!failures().isEmpty()) { + builder.startArray("failures"); + for (FailedNodeException failure : failures()) { + builder.startObject(); + builder.field("node_id", failure.nodeId()); + builder.field("reason", failure.getMessage()); + builder.endObject(); + } + builder.endArray(); + } + + builder.endObject(); + return builder; + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoAction.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoAction.java new file mode 100644 index 0000000000000..d4722e20d1f84 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoAction.java @@ -0,0 +1,113 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.arrow.flight.bootstrap.FlightService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; + +/** + * Transport action for getting flight information from nodes + */ +public class TransportNodesFlightInfoAction extends TransportNodesAction< + NodesFlightInfoRequest, + NodesFlightInfoResponse, + NodesFlightInfoRequest.NodeFlightInfoRequest, + NodeFlightInfo> { + + private final FlightService flightService; + + /** + * Constructor for TransportNodesFlightInfoAction + * @param settings The settings for the action + * @param threadPool The thread pool for the action + * @param clusterService The cluster service for the action + * @param transportService The transport service for the action + * @param actionFilters The action filters for the action + * @param flightService The flight service for the action + */ + @Inject + public TransportNodesFlightInfoAction( + Settings settings, + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + FlightService flightService + ) { + super( + NodesFlightInfoAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + NodesFlightInfoRequest::new, + NodesFlightInfoRequest.NodeFlightInfoRequest::new, + ThreadPool.Names.MANAGEMENT, + NodeFlightInfo.class + ); + this.flightService = flightService; + } + + /** + * Creates a new response object for the action. + * @param request The associated request. + * @param nodeFlightInfos All successful node-level responses. + * @param failures All node-level failures. + * @return The response object. + */ + @Override + protected NodesFlightInfoResponse newResponse( + NodesFlightInfoRequest request, + List nodeFlightInfos, + List failures + ) { + return new NodesFlightInfoResponse(clusterService.getClusterName(), nodeFlightInfos, failures); + } + + /** + * Creates a new request object for a node. + * @param request The associated request. + * @return The request object. + */ + @Override + protected NodesFlightInfoRequest.NodeFlightInfoRequest newNodeRequest(NodesFlightInfoRequest request) { + return new NodesFlightInfoRequest.NodeFlightInfoRequest(request); + } + + /** + * Creates a new response object for a node. + * @param in The stream input to read from. + * @return The response object. + */ + @Override + protected NodeFlightInfo newNodeResponse(StreamInput in) throws IOException { + return new NodeFlightInfo(in); + } + + /** + * Creates a new response object for a node. + * @param request The associated request. + * @return The response object. + */ + @Override + protected NodeFlightInfo nodeOperation(NodesFlightInfoRequest.NodeFlightInfoRequest request) { + return new NodeFlightInfo(clusterService.localNode(), flightService.getBoundAddress()); + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/package-info.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/package-info.java new file mode 100644 index 0000000000000..d89ec87f9a51e --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Action to retrieve flight info from nodes + */ +package org.opensearch.arrow.flight.api; diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightClientManager.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightClientManager.java new file mode 100644 index 0000000000000..a81033f580a03 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightClientManager.java @@ -0,0 +1,252 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.arrow.flight.bootstrap; + +import org.apache.arrow.flight.FlightClient; +import org.apache.arrow.flight.Location; +import org.apache.arrow.flight.OSFlightClient; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.util.VisibleForTesting; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.Version; +import org.opensearch.arrow.flight.api.NodeFlightInfo; +import org.opensearch.arrow.flight.api.NodesFlightInfoAction; +import org.opensearch.arrow.flight.api.NodesFlightInfoRequest; +import org.opensearch.arrow.flight.api.NodesFlightInfoResponse; +import org.opensearch.arrow.flight.bootstrap.tls.SslContextProvider; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterStateListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Nullable; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.client.Client; + +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; + +import io.netty.channel.EventLoopGroup; + +import static org.opensearch.common.util.FeatureFlags.ARROW_STREAMS_SETTING; + +/** + * Manages Flight client connections to OpenSearch nodes in a cluster. + * This class maintains a pool of Flight clients for internode communication, + * handles client lifecycle, and responds to cluster state changes. + * + *

The manager implements ClusterStateListener to automatically update + * client connections when nodes join or leave the cluster.

+ */ +public class FlightClientManager implements ClusterStateListener, AutoCloseable { + private static final Version MIN_SUPPORTED_VERSION = Version.V_3_0_0; + private static final Logger logger = LogManager.getLogger(FlightClientManager.class); + static final int LOCATION_TIMEOUT_MS = 1000; + private final ExecutorService grpcExecutor; + private final ClientConfiguration clientConfig; + private final Map flightClients = new ConcurrentHashMap<>(); + private final Client client; + + /** + * Creates a new FlightClientManager instance. + * + * @param allocator Supplier for buffer allocation + * @param clusterService Service for cluster state management + * @param sslContextProvider Provider for SSL/TLS context configuration + * @param elg Event loop group for network operations + * @param threadPool Thread pool for executing tasks asynchronously + * @param client OpenSearch client + */ + public FlightClientManager( + BufferAllocator allocator, + ClusterService clusterService, + @Nullable SslContextProvider sslContextProvider, + EventLoopGroup elg, + ThreadPool threadPool, + Client client + ) { + grpcExecutor = threadPool.executor(ServerConfig.FLIGHT_CLIENT_THREAD_POOL_NAME); + this.clientConfig = new ClientConfiguration( + Objects.requireNonNull(allocator, "BufferAllocator cannot be null"), + Objects.requireNonNull(clusterService, "ClusterService cannot be null"), + sslContextProvider, + Objects.requireNonNull(elg, "EventLoopGroup cannot be null"), + Objects.requireNonNull(grpcExecutor, "ExecutorService cannot be null") + ); + this.client = Objects.requireNonNull(client, "Client cannot be null"); + clusterService.addListener(this); + } + + /** + * Returns a Flight client for a given node ID. + * + * @param nodeId The ID of the node for which to retrieve the Flight client + * @return An OpenSearchFlightClient instance for the specified node + */ + public Optional getFlightClient(String nodeId) { + return Optional.ofNullable(flightClients.get(nodeId)); + } + + /** + * Builds a client for a given nodeId in asynchronous manner + * @param nodeId nodeId of the node to build client for + */ + public void buildClientAsync(String nodeId) { + CompletableFuture locationFuture = new CompletableFuture<>(); + locationFuture.thenAccept(location -> { + DiscoveryNode node = getNodeFromClusterState(nodeId); + buildClientAndAddToPool(location, node); + }).exceptionally(throwable -> { + logger.error("Failed to get Flight server location for node: [{}] {}", nodeId, throwable); + throw new RuntimeException(throwable); + }); + requestNodeLocation(nodeId, locationFuture); + } + + private void buildClientAndAddToPool(Location location, DiscoveryNode node) { + if (!isValidNode(node)) { + logger.warn( + "Unable to build FlightClient for node [{}] with role [{}] on version [{}]", + node.getId(), + node.getRoles(), + node.getVersion() + ); + return; + } + flightClients.computeIfAbsent(node.getId(), key -> buildClient(location)); + } + + private void requestNodeLocation(String nodeId, CompletableFuture future) { + NodesFlightInfoRequest request = new NodesFlightInfoRequest(nodeId); + try { + + client.execute(NodesFlightInfoAction.INSTANCE, request, new ActionListener<>() { + @Override + public void onResponse(NodesFlightInfoResponse response) { + NodeFlightInfo nodeInfo = response.getNodesMap().get(nodeId); + if (nodeInfo != null) { + TransportAddress publishAddress = nodeInfo.getBoundAddress().publishAddress(); + String address = publishAddress.getAddress(); + int flightPort = publishAddress.address().getPort(); + Location location = clientConfig.sslContextProvider != null + ? Location.forGrpcTls(address, flightPort) + : Location.forGrpcInsecure(address, flightPort); + + future.complete(location); + } else { + future.completeExceptionally(new IllegalStateException("No Flight info received for node: [" + nodeId + "]")); + } + } + + @Override + public void onFailure(Exception e) { + future.completeExceptionally(e); + logger.error("Failed to get Flight server info for node: [{}] {}", nodeId, e); + } + }); + } catch (final Exception ex) { + future.completeExceptionally(ex); + } + } + + private FlightClient buildClient(Location location) { + return OSFlightClient.builder() + .allocator(clientConfig.allocator) + .location(location) + .channelType(ServerConfig.clientChannelType()) + .eventLoopGroup(clientConfig.workerELG) + .sslContext(clientConfig.sslContextProvider != null ? clientConfig.sslContextProvider.getClientSslContext() : null) + .executor(clientConfig.grpcExecutor) + .build(); + } + + private DiscoveryNode getNodeFromClusterState(String nodeId) { + return Objects.requireNonNull(clientConfig.clusterService).state().nodes().get(nodeId); + } + + /** + * Closes the FlightClientManager and all associated Flight clients. + */ + @Override + public void close() throws Exception { + for (FlightClient flightClient : flightClients.values()) { + flightClient.close(); + } + flightClients.clear(); + grpcExecutor.shutdown(); + grpcExecutor.awaitTermination(5, TimeUnit.SECONDS); + clientConfig.clusterService.removeListener(this); + } + + /** + * Returns the ID of the local node in the cluster. + * + * @return String representing the local node ID + */ + public String getLocalNodeId() { + return Objects.requireNonNull(clientConfig.clusterService).state().nodes().getLocalNodeId(); + } + + /** + * Handles cluster state changes by updating node locations and managing client connections. + * + * @param event The ClusterChangedEvent containing information about the cluster state change + */ + @Override + public void clusterChanged(ClusterChangedEvent event) { + if (event.nodesChanged()) { + DiscoveryNodes nodes = event.state().nodes(); + flightClients.keySet().removeIf(nodeId -> !nodes.nodeExists(nodeId)); + for (DiscoveryNode node : nodes) { + if (!flightClients.containsKey(node.getId()) && isValidNode(node)) { + buildClientAsync(node.getId()); + } + } + } + } + + private static boolean isValidNode(DiscoveryNode node) { + return node != null && !node.getVersion().before(MIN_SUPPORTED_VERSION) && FeatureFlags.isEnabled(ARROW_STREAMS_SETTING); + } + + private Set getCurrentClusterNodes() { + return Objects.requireNonNull(clientConfig.clusterService).state().nodes().getNodes().keySet(); + } + + @VisibleForTesting + Map getFlightClients() { + return flightClients; + } + + private record ClientConfiguration(BufferAllocator allocator, ClusterService clusterService, SslContextProvider sslContextProvider, + EventLoopGroup workerELG, ExecutorService grpcExecutor) { + private ClientConfiguration( + BufferAllocator allocator, + ClusterService clusterService, + @Nullable SslContextProvider sslContextProvider, + EventLoopGroup workerELG, + ExecutorService grpcExecutor + ) { + this.allocator = allocator; + this.clusterService = clusterService; + this.sslContextProvider = sslContextProvider; + this.workerELG = workerELG; + this.grpcExecutor = grpcExecutor; + } + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java new file mode 100644 index 0000000000000..7735fc3df73e0 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java @@ -0,0 +1,170 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.bootstrap; + +import org.apache.arrow.flight.NoOpFlightProducer; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.util.AutoCloseables; +import org.apache.arrow.util.VisibleForTesting; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.arrow.flight.bootstrap.tls.DefaultSslContextProvider; +import org.opensearch.arrow.flight.bootstrap.tls.SslContextProvider; +import org.opensearch.arrow.spi.StreamManager; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.client.Client; + +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Objects; + +/** + * FlightService manages the Arrow Flight server and client for OpenSearch. + * It handles the initialization, startup, and shutdown of the Flight server and client, + * as well as managing the stream operations through a FlightStreamManager. + */ +public class FlightService extends NetworkPlugin.AuxTransport { + private static final Logger logger = LogManager.getLogger(FlightService.class); + private final ServerComponents serverComponents; + private StreamManager streamManager; + private Client client; + private FlightClientManager clientManager; + private SecureTransportSettingsProvider secureTransportSettingsProvider; + private BufferAllocator allocator; + private ThreadPool threadPool; + + /** + * Constructor for FlightService. + * @param settings The settings for the FlightService. + */ + public FlightService(Settings settings) { + Objects.requireNonNull(settings, "Settings cannot be null"); + try { + ServerConfig.init(settings); + } catch (Exception e) { + throw new RuntimeException("Failed to initialize Arrow Flight server", e); + } + this.serverComponents = new ServerComponents(settings); + } + + void setClusterService(ClusterService clusterService) { + serverComponents.setClusterService(Objects.requireNonNull(clusterService, "ClusterService cannot be null")); + } + + void setNetworkService(NetworkService networkService) { + serverComponents.setNetworkService(Objects.requireNonNull(networkService, "NetworkService cannot be null")); + } + + void setThreadPool(ThreadPool threadPool) { + this.threadPool = Objects.requireNonNull(threadPool, "ThreadPool cannot be null"); + serverComponents.setThreadPool(threadPool); + } + + void setClient(Client client) { + this.client = client; + } + + void setSecureTransportSettingsProvider(SecureTransportSettingsProvider secureTransportSettingsProvider) { + this.secureTransportSettingsProvider = secureTransportSettingsProvider; + } + + /** + * Starts the FlightService by initializing the stream manager. + */ + @SuppressWarnings("removal") + @Override + protected void doStart() { + try { + allocator = AccessController.doPrivileged((PrivilegedAction) () -> new RootAllocator(Integer.MAX_VALUE)); + serverComponents.setAllocator(allocator); + SslContextProvider sslContextProvider = ServerConfig.isSslEnabled() + ? new DefaultSslContextProvider(secureTransportSettingsProvider) + : null; + serverComponents.setSslContextProvider(sslContextProvider); + serverComponents.initComponents(); + clientManager = new FlightClientManager( + allocator, // sharing the same allocator between server and client + serverComponents.clusterService, + sslContextProvider, + serverComponents.workerEventLoopGroup, // sharing the same worker ELG between server and client + threadPool, + client + ); + initializeStreamManager(clientManager); + serverComponents.setFlightProducer(new NoOpFlightProducer()); + serverComponents.start(); + + } catch (Exception e) { + logger.error("Failed to start Flight server", e); + doClose(); + throw new RuntimeException("Failed to start Flight server", e); + } + } + + /** + * Retrieves the FlightClientManager used by the FlightService. + * @return The FlightClientManager instance. + */ + public FlightClientManager getFlightClientManager() { + return clientManager; + } + + /** + * Retrieves the StreamManager used by the FlightService. + * @return The StreamManager instance. + */ + public StreamManager getStreamManager() { + return streamManager; + } + + /** + * Retrieves the bound address of the FlightService. + * @return The BoundTransportAddress instance. + */ + public BoundTransportAddress getBoundAddress() { + return serverComponents.getBoundAddress(); + } + + @VisibleForTesting + SslContextProvider getSslContextProvider() { + return serverComponents.getSslContextProvider(); + } + + /** + * Stops the FlightService by closing the server components and network resources. + */ + @Override + protected void doStop() { + try { + AutoCloseables.close(serverComponents, streamManager, clientManager, allocator); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * doStop() ensures all resources are cleaned up and resources are recreated on + * doStart() + */ + @Override + protected void doClose() { + doStop(); + } + + private void initializeStreamManager(FlightClientManager clientManager) { + streamManager = null; + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightStreamPlugin.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightStreamPlugin.java new file mode 100644 index 0000000000000..bb7edf491cf02 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightStreamPlugin.java @@ -0,0 +1,264 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.bootstrap; + +import org.opensearch.arrow.flight.api.FlightServerInfoAction; +import org.opensearch.arrow.flight.api.NodesFlightInfoAction; +import org.opensearch.arrow.flight.api.TransportNodesFlightInfoAction; +import org.opensearch.arrow.spi.StreamManager; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.IndexScopedSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.ClusterPlugin; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.StreamManagerPlugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.rest.RestController; +import org.opensearch.rest.RestHandler; +import org.opensearch.script.ScriptService; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ExecutorBuilder; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.Transport; +import org.opensearch.transport.client.Client; +import org.opensearch.watcher.ResourceWatcherService; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +/** + * FlightStreamPlugin class extends BaseFlightStreamPlugin and provides implementation for FlightStream plugin. + */ +public class FlightStreamPlugin extends Plugin implements StreamManagerPlugin, NetworkPlugin, ActionPlugin, ClusterPlugin { + + private final FlightService flightService; + private final boolean isArrowStreamsEnabled; + + /** + * Constructor for FlightStreamPluginImpl. + * @param settings The settings for the FlightStreamPlugin. + */ + public FlightStreamPlugin(Settings settings) { + this.isArrowStreamsEnabled = FeatureFlags.isEnabled(FeatureFlags.ARROW_STREAMS); + this.flightService = isArrowStreamsEnabled ? new FlightService(settings) : null; + } + + /** + * Creates components for the FlightStream plugin. + * @param client The client instance. + * @param clusterService The cluster service instance. + * @param threadPool The thread pool instance. + * @param resourceWatcherService The resource watcher service instance. + * @param scriptService The script service instance. + * @param xContentRegistry The named XContent registry. + * @param environment The environment instance. + * @param nodeEnvironment The node environment instance. + * @param namedWriteableRegistry The named writeable registry. + * @param indexNameExpressionResolver The index name expression resolver instance. + * @param repositoriesServiceSupplier The supplier for the repositories service. + * @return FlightService + */ + @Override + public Collection createComponents( + Client client, + ClusterService clusterService, + ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, + ScriptService scriptService, + NamedXContentRegistry xContentRegistry, + Environment environment, + NodeEnvironment nodeEnvironment, + NamedWriteableRegistry namedWriteableRegistry, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier repositoriesServiceSupplier + ) { + if (!isArrowStreamsEnabled) { + return Collections.emptyList(); + } + flightService.setClusterService(clusterService); + flightService.setThreadPool(threadPool); + flightService.setClient(client); + return List.of(flightService); + } + + /** + * Gets the secure transports for the FlightStream plugin. + * @param settings The settings for the plugin. + * @param threadPool The thread pool instance. + * @param pageCacheRecycler The page cache recycler instance. + * @param circuitBreakerService The circuit breaker service instance. + * @param namedWriteableRegistry The named writeable registry. + * @param networkService The network service instance. + * @param secureTransportSettingsProvider The secure transport settings provider. + * @param tracer The tracer instance. + * @return A map of secure transports. + */ + @Override + public Map> getSecureTransports( + Settings settings, + ThreadPool threadPool, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService, + SecureTransportSettingsProvider secureTransportSettingsProvider, + Tracer tracer + ) { + if (!isArrowStreamsEnabled) { + return Collections.emptyMap(); + } + flightService.setSecureTransportSettingsProvider(secureTransportSettingsProvider); + return Collections.emptyMap(); + } + + /** + * Gets the auxiliary transports for the FlightStream plugin. + * @param settings The settings for the plugin. + * @param threadPool The thread pool instance. + * @param circuitBreakerService The circuit breaker service instance. + * @param networkService The network service instance. + * @param clusterSettings The cluster settings instance. + * @param tracer The tracer instance. + * @return A map of auxiliary transports. + */ + @Override + public Map> getAuxTransports( + Settings settings, + ThreadPool threadPool, + CircuitBreakerService circuitBreakerService, + NetworkService networkService, + ClusterSettings clusterSettings, + Tracer tracer + ) { + if (!isArrowStreamsEnabled) { + return Collections.emptyMap(); + } + flightService.setNetworkService(networkService); + return Collections.singletonMap(FlightService.AUX_TRANSPORT_TYPES_KEY, () -> flightService); + } + + /** + * Gets the REST handlers for the FlightStream plugin. + * @param settings The settings for the plugin. + * @param restController The REST controller instance. + * @param clusterSettings The cluster settings instance. + * @param indexScopedSettings The index scoped settings instance. + * @param settingsFilter The settings filter instance. + * @param indexNameExpressionResolver The index name expression resolver instance. + * @param nodesInCluster The supplier for the discovery nodes. + * @return A list of REST handlers. + */ + @Override + public List getRestHandlers( + Settings settings, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster + ) { + if (!isArrowStreamsEnabled) { + return Collections.emptyList(); + } + return List.of(new FlightServerInfoAction()); + } + + /** + * Gets the list of action handlers for the FlightStream plugin. + * @return A list of action handlers. + */ + @Override + public List> getActions() { + if (!isArrowStreamsEnabled) { + return Collections.emptyList(); + } + return List.of(new ActionHandler<>(NodesFlightInfoAction.INSTANCE, TransportNodesFlightInfoAction.class)); + } + + /** + * Called when node is started. DiscoveryNode argument is passed to allow referring localNode value inside plugin + * + * @param localNode local Node info + */ + @Override + public void onNodeStarted(DiscoveryNode localNode) { + if (!isArrowStreamsEnabled) { + return; + } + flightService.getFlightClientManager().buildClientAsync(localNode.getId()); + } + + /** + * Gets the StreamManager instance for managing flight streams. + */ + @Override + public Supplier getStreamManager() { + if (!isArrowStreamsEnabled) { + return null; + } + return flightService::getStreamManager; + } + + /** + * Gets the list of ExecutorBuilder instances for building thread pools used for FlightServer. + * @param settings The settings for the plugin + */ + @Override + public List> getExecutorBuilders(Settings settings) { + if (!isArrowStreamsEnabled) { + return Collections.emptyList(); + } + return List.of(ServerConfig.getServerExecutorBuilder(), ServerConfig.getClientExecutorBuilder()); + } + + /** + * Gets the list of settings for the Flight plugin. + */ + @Override + public List> getSettings() { + if (!isArrowStreamsEnabled) { + return Collections.emptyList(); + } + return new ArrayList<>( + Arrays.asList( + ServerComponents.SETTING_FLIGHT_PORTS, + ServerComponents.SETTING_FLIGHT_HOST, + ServerComponents.SETTING_FLIGHT_BIND_HOST, + ServerComponents.SETTING_FLIGHT_PUBLISH_HOST + ) + ) { + { + addAll(ServerConfig.getSettings()); + } + }; + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerComponents.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerComponents.java new file mode 100644 index 0000000000000..06b8b6bd4d35c --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerComponents.java @@ -0,0 +1,286 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.bootstrap; + +import org.apache.arrow.flight.FlightProducer; +import org.apache.arrow.flight.FlightServer; +import org.apache.arrow.flight.Location; +import org.apache.arrow.flight.OSFlightServer; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.util.AutoCloseables; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.arrow.flight.bootstrap.tls.SslContextProvider; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Nullable; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.PortsRange; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.BindTransportException; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; + +import io.netty.channel.EventLoopGroup; +import io.netty.util.NettyRuntime; +import io.netty.util.concurrent.Future; + +import static java.util.Collections.emptyList; +import static org.opensearch.common.settings.Setting.intSetting; +import static org.opensearch.common.settings.Setting.listSetting; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_PORT; +import static org.opensearch.transport.Transport.resolveTransportPublishPort; + +@SuppressWarnings("removal") +final class ServerComponents implements AutoCloseable { + + public static final Setting> SETTING_FLIGHT_HOST = listSetting( + "arrow.flight.host", + emptyList(), + Function.identity(), + Setting.Property.NodeScope + ); + + public static final Setting> SETTING_FLIGHT_BIND_HOST = listSetting( + "arrow.flight.bind_host", + SETTING_FLIGHT_HOST, + Function.identity(), + Setting.Property.NodeScope + ); + + public static final Setting> SETTING_FLIGHT_PUBLISH_HOST = listSetting( + "arrow.flight.publish_host", + SETTING_FLIGHT_HOST, + Function.identity(), + Setting.Property.NodeScope + ); + + public static final Setting SETTING_FLIGHT_PUBLISH_PORT = intSetting( + "arrow.flight.publish_port", + -1, + -1, + Setting.Property.NodeScope + ); + + private static final Logger logger = LogManager.getLogger(ServerComponents.class); + + private static final String GRPC_WORKER_ELG = "os-grpc-worker-ELG"; + private static final String GRPC_BOSS_ELG = "os-grpc-boss-ELG"; + private static final int SHUTDOWN_TIMEOUT_SECONDS = 5; + + public static final String FLIGHT_TRANSPORT_SETTING_KEY = "transport-flight"; + public static final Setting SETTING_FLIGHT_PORTS = AUX_TRANSPORT_PORT.getConcreteSettingForNamespace( + FLIGHT_TRANSPORT_SETTING_KEY + ); + + private final Settings settings; + private final PortsRange port; + private final String[] bindHosts; + private final String[] publishHosts; + private volatile BoundTransportAddress boundAddress; + + private FlightServer server; + private BufferAllocator allocator; + ClusterService clusterService; + private NetworkService networkService; + private ThreadPool threadPool; + private SslContextProvider sslContextProvider; + private FlightProducer flightProducer; + + private EventLoopGroup bossEventLoopGroup; + EventLoopGroup workerEventLoopGroup; + private ExecutorService serverExecutor; + + ServerComponents(Settings settings) { + this.settings = settings; + this.port = SETTING_FLIGHT_PORTS.get(settings); + + List bindHosts = SETTING_FLIGHT_BIND_HOST.get(settings); + this.bindHosts = bindHosts.toArray(new String[0]); + + List publishHosts = SETTING_FLIGHT_PUBLISH_HOST.get(settings); + this.publishHosts = publishHosts.toArray(new String[0]); + } + + void setAllocator(BufferAllocator allocator) { + this.allocator = allocator; + } + + void setClusterService(ClusterService clusterService) { + this.clusterService = Objects.requireNonNull(clusterService); + } + + void setNetworkService(NetworkService networkService) { + this.networkService = Objects.requireNonNull(networkService); + } + + void setThreadPool(ThreadPool threadPool) { + this.threadPool = Objects.requireNonNull(threadPool); + } + + void setSslContextProvider(@Nullable SslContextProvider sslContextProvider) { + this.sslContextProvider = sslContextProvider; + } + + void setFlightProducer(FlightProducer flightProducer) { + this.flightProducer = Objects.requireNonNull(flightProducer); + } + + private FlightServer buildAndStartServer(Location location, FlightProducer producer) throws IOException { + FlightServer server = OSFlightServer.builder() + .allocator(allocator) + .location(location) + .producer(producer) + .sslContext(sslContextProvider != null ? sslContextProvider.getServerSslContext() : null) + .channelType(ServerConfig.serverChannelType()) + .bossEventLoopGroup(bossEventLoopGroup) + .workerEventLoopGroup(workerEventLoopGroup) + .executor(serverExecutor) + .build(); + AccessController.doPrivileged((PrivilegedAction) () -> { + try { + server.start(); + } catch (IOException e) { + throw new RuntimeException(e); + } + return null; + }); + return server; + } + + SslContextProvider getSslContextProvider() { + return sslContextProvider; + } + + BoundTransportAddress getBoundAddress() { + return boundAddress; + } + + void start() { + InetAddress[] hostAddresses; + try { + hostAddresses = networkService.resolveBindHostAddresses(bindHosts); + } catch (IOException e) { + throw new BindTransportException("Failed to resolve host [" + Arrays.toString(bindHosts) + "]", e); + } + + List boundAddresses = new ArrayList<>(hostAddresses.length); + for (InetAddress address : hostAddresses) { + AccessController.doPrivileged((PrivilegedAction) () -> { + boundAddresses.add(bindAddress(address, port)); + return null; + }); + } + + final InetAddress publishInetAddress; + try { + publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts); + } catch (Exception e) { + throw new BindTransportException("Failed to resolve publish address", e); + } + + final int publishPort = resolveTransportPublishPort(SETTING_FLIGHT_PUBLISH_PORT.get(settings), boundAddresses, publishInetAddress); + + if (publishPort < 0) { + throw new BindTransportException( + "Failed to auto-resolve flight publish port, multiple bound addresses " + + boundAddresses + + " with distinct ports and none of them matched the publish address (" + + publishInetAddress + + "). Please specify a unique port by setting " + + SETTING_FLIGHT_PUBLISH_PORT.getKey() + ); + } + + TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); + this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), publishAddress); + } + + void initComponents() throws Exception { + bossEventLoopGroup = ServerConfig.createELG(GRPC_BOSS_ELG, 1); + workerEventLoopGroup = ServerConfig.createELG(GRPC_WORKER_ELG, NettyRuntime.availableProcessors() * 2); + serverExecutor = threadPool.executor(ServerConfig.FLIGHT_SERVER_THREAD_POOL_NAME); + } + + @Override + public void close() { + try { + AutoCloseables.close(server); + gracefullyShutdownELG(bossEventLoopGroup, GRPC_BOSS_ELG); + gracefullyShutdownELG(workerEventLoopGroup, GRPC_WORKER_ELG); + if (serverExecutor != null) { + serverExecutor.shutdown(); + } + } catch (Exception e) { + logger.error("Error while closing server components", e); + } + } + + private TransportAddress bindAddress(final InetAddress hostAddress, final PortsRange portsRange) { + final AtomicReference lastException = new AtomicReference<>(); + final AtomicReference boundSocket = new AtomicReference<>(); + final TransportAddress[] address = new TransportAddress[1]; + boolean success = portsRange.iterate(portNumber -> { + boundSocket.set(new InetSocketAddress(hostAddress, portNumber)); + address[0] = new TransportAddress(boundSocket.get()); + try { + return startFlightServer(address[0]); + } catch (Exception e) { + lastException.set(e); + return false; + } + }); + + if (!success) { + throw new BindTransportException("Failed to bind to [" + hostAddress + "]", lastException.get()); + } + return address[0]; + } + + private boolean startFlightServer(TransportAddress transportAddress) { + InetSocketAddress address = transportAddress.address(); + Location serverLocation = sslContextProvider != null + ? Location.forGrpcTls(address.getHostString(), address.getPort()) + : Location.forGrpcInsecure(address.getHostString(), address.getPort()); + try { + this.server = buildAndStartServer(serverLocation, flightProducer); + logger.info("Arrow Flight server started. Listening at {}", serverLocation); + return true; + } catch (Exception e) { + String errorMsg = "Failed to start Arrow Flight server at " + serverLocation; + logger.debug(errorMsg, e); + return false; + } + } + + private void gracefullyShutdownELG(EventLoopGroup group, String groupName) { + if (group != null) { + Future shutdownFuture = group.shutdownGracefully(0, SHUTDOWN_TIMEOUT_SECONDS, TimeUnit.SECONDS); + shutdownFuture.awaitUninterruptibly(); + if (!shutdownFuture.isSuccess()) { + logger.warn("Error closing {} netty event loop group {}", groupName, shutdownFuture.cause()); + } + } + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerConfig.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerConfig.java new file mode 100644 index 0000000000000..78b8b1dd56a6a --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerConfig.java @@ -0,0 +1,218 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.bootstrap; + +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.threadpool.ScalingExecutorBuilder; + +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import io.netty.channel.Channel; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.epoll.Epoll; +import io.netty.channel.epoll.EpollEventLoopGroup; +import io.netty.channel.epoll.EpollServerSocketChannel; +import io.netty.channel.epoll.EpollSocketChannel; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; + +/** + * Configuration class for OpenSearch Flight server settings. + * This class manages server-side configurations including port settings, Arrow memory settings, + * thread pool configurations, and SSL/TLS settings. + */ +public class ServerConfig { + /** + * Creates a new instance of the server configuration with default settings. + */ + public ServerConfig() {} + + static final Setting ARROW_ALLOCATION_MANAGER_TYPE = Setting.simpleString( + "arrow.allocation.manager.type", + "Netty", + Setting.Property.NodeScope + ); + + static final Setting ARROW_ENABLE_NULL_CHECK_FOR_GET = Setting.boolSetting( + "arrow.enable_null_check_for_get", + false, + Setting.Property.NodeScope + ); + + static final Setting ARROW_ENABLE_DEBUG_ALLOCATOR = Setting.boolSetting( + "arrow.memory.debug.allocator", + false, + Setting.Property.NodeScope + ); + + static final Setting ARROW_ENABLE_UNSAFE_MEMORY_ACCESS = Setting.boolSetting( + "arrow.enable_unsafe_memory_access", + true, + Setting.Property.NodeScope + ); + + static final Setting FLIGHT_THREAD_POOL_MIN_SIZE = Setting.intSetting( + "thread_pool.flight-server.min", + 0, + 0, + Setting.Property.NodeScope + ); + + static final Setting FLIGHT_THREAD_POOL_MAX_SIZE = Setting.intSetting( + "thread_pool.flight-server.max", + 100000, // TODO depends on max concurrent streams per node, decide after benchmark. To be controlled by admission control layer. + 1, + Setting.Property.NodeScope + ); + + static final Setting FLIGHT_THREAD_POOL_KEEP_ALIVE = Setting.timeSetting( + "thread_pool.flight-server.keep_alive", + TimeValue.timeValueSeconds(30), + Setting.Property.NodeScope + ); + + static final Setting ARROW_SSL_ENABLE = Setting.boolSetting( + "arrow.ssl.enable", + false, // TODO: get default from security enabled + Setting.Property.NodeScope + ); + + /** + * The thread pool name for the Flight server. + */ + public static final String FLIGHT_SERVER_THREAD_POOL_NAME = "flight-server"; + + /** + * The thread pool name for the Flight client. + */ + public static final String FLIGHT_CLIENT_THREAD_POOL_NAME = "flight-client"; + + private static final String host = "localhost"; + private static boolean enableSsl; + private static int threadPoolMin; + private static int threadPoolMax; + private static TimeValue keepAlive; + + /** + * Initializes the server configuration with the provided settings. + * Sets system properties for Arrow memory management and configures thread pool settings. + * + * @param settings The OpenSearch settings to initialize the server with + */ + @SuppressForbidden(reason = "required for arrow allocator") + @SuppressWarnings("removal") + public static void init(Settings settings) { + AccessController.doPrivileged((PrivilegedAction) () -> { + System.setProperty("arrow.allocation.manager.type", ARROW_ALLOCATION_MANAGER_TYPE.get(settings)); + System.setProperty("arrow.enable_null_check_for_get", Boolean.toString(ARROW_ENABLE_NULL_CHECK_FOR_GET.get(settings))); + System.setProperty("arrow.enable_unsafe_memory_access", Boolean.toString(ARROW_ENABLE_UNSAFE_MEMORY_ACCESS.get(settings))); + System.setProperty("arrow.memory.debug.allocator", Boolean.toString(ARROW_ENABLE_DEBUG_ALLOCATOR.get(settings))); + Netty4Configs.init(settings); + return null; + }); + enableSsl = ARROW_SSL_ENABLE.get(settings); + threadPoolMin = FLIGHT_THREAD_POOL_MIN_SIZE.get(settings); + threadPoolMax = FLIGHT_THREAD_POOL_MAX_SIZE.get(settings); + keepAlive = FLIGHT_THREAD_POOL_KEEP_ALIVE.get(settings); + } + + /** + * Checks if SSL/TLS is enabled for the Flight server. + * + * @return true if SSL is enabled, false otherwise + */ + public static boolean isSslEnabled() { + return enableSsl; + } + + /** + * Gets the thread pool executor builder configured for the Flight server. + * + * @return The configured ScalingExecutorBuilder instance + */ + public static ScalingExecutorBuilder getServerExecutorBuilder() { + return new ScalingExecutorBuilder(FLIGHT_SERVER_THREAD_POOL_NAME, threadPoolMin, threadPoolMax, keepAlive); + } + + /** + * Gets the thread pool executor builder configured for the Flight server. + * + * @return The configured ScalingExecutorBuilder instance + */ + public static ScalingExecutorBuilder getClientExecutorBuilder() { + return new ScalingExecutorBuilder(FLIGHT_CLIENT_THREAD_POOL_NAME, threadPoolMin, threadPoolMax, keepAlive); + } + + /** + * Returns a list of all settings managed by this configuration class. + * + * @return List of Setting instances + */ + public static List> getSettings() { + return new ArrayList<>( + Arrays.asList( + ARROW_ALLOCATION_MANAGER_TYPE, + ARROW_ENABLE_NULL_CHECK_FOR_GET, + ARROW_ENABLE_DEBUG_ALLOCATOR, + ARROW_ENABLE_UNSAFE_MEMORY_ACCESS, + ARROW_SSL_ENABLE + ) + ); + } + + static EventLoopGroup createELG(String name, int eventLoopThreads) { + + return Epoll.isAvailable() + ? new EpollEventLoopGroup(eventLoopThreads, OpenSearchExecutors.daemonThreadFactory(name)) + : new NioEventLoopGroup(eventLoopThreads, OpenSearchExecutors.daemonThreadFactory(name)); + } + + static Class serverChannelType() { + return Epoll.isAvailable() ? EpollServerSocketChannel.class : NioServerSocketChannel.class; + } + + static Class clientChannelType() { + return Epoll.isAvailable() ? EpollSocketChannel.class : NioSocketChannel.class; + } + + private static class Netty4Configs { + + @SuppressForbidden(reason = "required for netty allocator configuration") + public static void init(Settings settings) { + checkSystemProperty("io.netty.allocator.numDirectArenas", "1"); + checkSystemProperty("io.netty.noUnsafe", "false"); + checkSystemProperty("io.netty.tryUnsafe", "true"); + checkSystemProperty("io.netty.tryReflectionSetAccessible", "true"); + } + + private static void checkSystemProperty(String propertyName, String expectedValue) { + String actualValue = System.getProperty(propertyName); + if (!expectedValue.equals(actualValue)) { + throw new IllegalStateException( + "Required system property [" + + propertyName + + "] is incorrect; expected: [" + + expectedValue + + "] actual: [" + + actualValue + + "]." + ); + } + } + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/package-info.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/package-info.java new file mode 100644 index 0000000000000..3ee247809b0c0 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Bootstrap classes for initializing and configuring OpenSearch Flight service. + */ +package org.opensearch.arrow.flight.bootstrap; diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/DefaultSslContextProvider.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/DefaultSslContextProvider.java new file mode 100644 index 0000000000000..187124911fc5f --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/DefaultSslContextProvider.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.bootstrap.tls; + +import org.opensearch.plugins.SecureTransportSettingsProvider; + +import javax.net.ssl.SSLException; + +import java.util.Locale; + +import io.netty.handler.ssl.ApplicationProtocolConfig; +import io.netty.handler.ssl.ApplicationProtocolNames; +import io.netty.handler.ssl.ClientAuth; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslProvider; +import io.netty.handler.ssl.SupportedCipherSuiteFilter; + +/** + * DefaultSslContextProvider is an implementation of the SslContextProvider interface that provides SSL contexts based on the provided SecureTransportSettingsProvider. + */ +public class DefaultSslContextProvider implements SslContextProvider { + + private final SecureTransportSettingsProvider secureTransportSettingsProvider; + + /** + * Constructor for DefaultSslContextProvider. + * @param secureTransportSettingsProvider The SecureTransportSettingsProvider instance. + */ + public DefaultSslContextProvider(SecureTransportSettingsProvider secureTransportSettingsProvider) { + this.secureTransportSettingsProvider = secureTransportSettingsProvider; + } + + // TODO - handle certificates reload + /** + * Creates and returns the server SSL context based on the provided SecureTransportSettingsProvider. + * @return The server SSL context. + */ + @Override + public SslContext getServerSslContext() { + try { + SecureTransportSettingsProvider.SecureTransportParameters parameters = secureTransportSettingsProvider.parameters(null).get(); + return SslContextBuilder.forServer(parameters.keyManagerFactory().get()) + .sslProvider(SslProvider.valueOf(parameters.sslProvider().get().toUpperCase(Locale.ROOT))) + .clientAuth(ClientAuth.valueOf(parameters.clientAuth().get().toUpperCase(Locale.ROOT))) + .protocols(parameters.protocols()) + .ciphers(parameters.cipherSuites(), SupportedCipherSuiteFilter.INSTANCE) + .sessionCacheSize(0) + .sessionTimeout(0) + .applicationProtocolConfig( + new ApplicationProtocolConfig( + ApplicationProtocolConfig.Protocol.ALPN, + // NO_ADVERTISE is currently the only mode supported by both OpenSsl and JDK providers. + ApplicationProtocolConfig.SelectorFailureBehavior.NO_ADVERTISE, + // ACCEPT is currently the only mode supported by both OpenSsl and JDK providers. + ApplicationProtocolConfig.SelectedListenerFailureBehavior.ACCEPT, + ApplicationProtocolNames.HTTP_2, + ApplicationProtocolNames.HTTP_1_1 + ) + ) + .trustManager(parameters.trustManagerFactory().get()) + .build(); + } catch (SSLException e) { + throw new RuntimeException(e); + } + } + + /** + * Returns the client SSL context based on the provided SecureTransportSettingsProvider. + * @return The client SSL context. + */ + @Override + public SslContext getClientSslContext() { + try { + SecureTransportSettingsProvider.SecureTransportParameters parameters = secureTransportSettingsProvider.parameters(null).get(); + return SslContextBuilder.forClient() + .sslProvider(SslProvider.valueOf(parameters.sslProvider().get().toUpperCase(Locale.ROOT))) + .protocols(parameters.protocols()) + .ciphers(parameters.cipherSuites(), SupportedCipherSuiteFilter.INSTANCE) + .applicationProtocolConfig( + new ApplicationProtocolConfig( + ApplicationProtocolConfig.Protocol.ALPN, + ApplicationProtocolConfig.SelectorFailureBehavior.NO_ADVERTISE, + ApplicationProtocolConfig.SelectedListenerFailureBehavior.ACCEPT, + ApplicationProtocolNames.HTTP_2, + ApplicationProtocolNames.HTTP_1_1 + ) + ) + .sessionCacheSize(0) + .sessionTimeout(0) + .keyManager(parameters.keyManagerFactory().get()) + .trustManager(parameters.trustManagerFactory().get()) + .build(); + } catch (SSLException e) { + throw new RuntimeException(e); + } + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/SslContextProvider.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/SslContextProvider.java new file mode 100644 index 0000000000000..2cd38bc3c1dd5 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/SslContextProvider.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.bootstrap.tls; + +import io.netty.handler.ssl.SslContext; + +/** + * Provider interface for SSL/TLS context configuration in OpenSearch Flight. + * This interface defines methods for managing SSL contexts for both server and client-side + * Flight communications. + */ +public interface SslContextProvider { + + /** + * Gets the SSL context configuration for the Flight server. + * This context is used to secure incoming connections to the Flight server. + * + * @return SslContext configured for server-side TLS + */ + SslContext getServerSslContext(); + + /** + * Gets the SSL context configuration for Flight clients. + * This context is used when making outbound connections to other Flight servers. + * + * @return SslContext configured for client-side TLS + */ + SslContext getClientSslContext(); +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/package-info.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/package-info.java new file mode 100644 index 0000000000000..2ad8ae734c2da --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * TLS/SSL configuration and security components for OpenSearch Flight service. + */ +package org.opensearch.arrow.flight.bootstrap.tls; diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/package-info.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/package-info.java new file mode 100644 index 0000000000000..2341a24d0be85 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Root package for OpenSearch Flight functionality, providing core flight service integration with OpenSearch. + */ +package org.opensearch.arrow.flight; diff --git a/plugins/arrow-flight-rpc/src/main/plugin-metadata/plugin-security.policy b/plugins/arrow-flight-rpc/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..803350a578009 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant codeBase "${codebase.netty-common}" { + permission java.net.SocketPermission "*", "accept,connect,listen,resolve"; + permission java.lang.RuntimePermission "*", "setContextClassLoader"; +}; + +grant codeBase "${codebase.grpc-core}" { + permission java.net.SocketPermission "*", "accept,connect,listen,resolve"; + permission java.lang.RuntimePermission "*", "setContextClassLoader"; +}; + +grant { + // arrow flight service permissions + permission java.util.PropertyPermission "arrow.allocation.manager.type", "write"; + permission java.util.PropertyPermission "arrow.enable_null_check_for_get", "write"; + permission java.util.PropertyPermission "arrow.enable_unsafe_memory_access", "write"; + permission java.util.PropertyPermission "arrow.memory.debug.allocator", "write"; + + permission java.util.PropertyPermission "io.netty.tryReflectionSetAccessible", "write"; + permission java.util.PropertyPermission "io.netty.allocator.numDirectArenas", "write"; + permission java.util.PropertyPermission "io.netty.noUnsafe", "write"; + permission java.util.PropertyPermission "io.netty.tryUnsafe", "write"; + + // Needed for netty based arrow flight server for netty configs related to buffer allocator + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + permission java.util.PropertyPermission "arrow.allocation.manager.type", "write"; + + permission java.lang.RuntimePermission "modifyThreadGroup"; + permission java.lang.RuntimePermission "modifyThread"; + permission java.net.SocketPermission "*", "accept,connect,listen,resolve"; + + // Reflection access needed by Arrow + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + + // Memory access + permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; +}; diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java new file mode 100644 index 0000000000000..6f93d792f9db4 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight; + +import org.opensearch.arrow.flight.api.FlightServerInfoAction; +import org.opensearch.arrow.flight.api.NodesFlightInfoAction; +import org.opensearch.arrow.flight.bootstrap.FlightService; +import org.opensearch.arrow.flight.bootstrap.FlightStreamPlugin; +import org.opensearch.arrow.spi.StreamManager; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ExecutorBuilder; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.function.Supplier; + +import static org.opensearch.common.util.FeatureFlags.ARROW_STREAMS_SETTING; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_KEY; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class FlightStreamPluginTests extends OpenSearchTestCase { + private Settings settings; + private ClusterService clusterService; + + @Override + public void setUp() throws Exception { + super.setUp(); + settings = Settings.builder().put(ARROW_STREAMS_SETTING.getKey(), true).build(); + clusterService = mock(ClusterService.class); + ClusterState clusterState = mock(ClusterState.class); + DiscoveryNodes nodes = mock(DiscoveryNodes.class); + when(clusterService.state()).thenReturn(clusterState); + when(clusterState.nodes()).thenReturn(nodes); + when(nodes.getLocalNodeId()).thenReturn("test-node"); + } + + public void testPluginEnabled() throws IOException { + FeatureFlags.initializeFeatureFlags(settings); + FeatureFlagSetter.set(ARROW_STREAMS_SETTING.getKey()); + FlightStreamPlugin plugin = new FlightStreamPlugin(settings); + Collection components = plugin.createComponents( + null, + clusterService, + mock(ThreadPool.class), + null, + null, + null, + null, + null, + null, + null, + null + ); + + assertNotNull(components); + assertFalse(components.isEmpty()); + assertEquals(1, components.size()); + assertTrue(components.iterator().next() instanceof FlightService); + + List> executorBuilders = plugin.getExecutorBuilders(settings); + assertNotNull(executorBuilders); + assertFalse(executorBuilders.isEmpty()); + assertEquals(2, executorBuilders.size()); + + Supplier streamManager = plugin.getStreamManager(); + assertNotNull(streamManager); + + List> settings = plugin.getSettings(); + assertNotNull(settings); + assertFalse(settings.isEmpty()); + + assertNotNull(plugin.getSecureTransports(null, null, null, null, null, null, mock(SecureTransportSettingsProvider.class), null)); + + assertTrue( + plugin.getAuxTransports(null, null, null, new NetworkService(List.of()), null, null) + .get(AUX_TRANSPORT_TYPES_KEY) + .get() instanceof FlightService + ); + assertEquals(1, plugin.getRestHandlers(null, null, null, null, null, null, null).size()); + assertTrue(plugin.getRestHandlers(null, null, null, null, null, null, null).get(0) instanceof FlightServerInfoAction); + assertEquals(1, plugin.getActions().size()); + assertEquals(NodesFlightInfoAction.INSTANCE.name(), plugin.getActions().get(0).getAction().name()); + + plugin.close(); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/FlightServerInfoActionTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/FlightServerInfoActionTests.java new file mode 100644 index 0000000000000..6cb75d4a93dbe --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/FlightServerInfoActionTests.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.SetOnce; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.rest.RestRequest; +import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.test.rest.RestActionTestCase; +import org.junit.Before; + +import java.util.Collections; + +import static org.mockito.Mockito.mock; + +public class FlightServerInfoActionTests extends RestActionTestCase { + private FlightServerInfoAction handler; + + @Before + public void setUpAction() { + handler = new FlightServerInfoAction(); + controller().registerHandler(handler); + } + + public void testGetName() { + assertEquals("flight_server_info_action", handler.getName()); + } + + public void testRoutes() { + var routes = handler.routes(); + assertEquals(2, routes.size()); + assertTrue( + routes.stream().anyMatch(route -> route.getPath().equals("/_flight/info") && route.getMethod() == RestRequest.Method.GET) + ); + assertTrue( + routes.stream() + .anyMatch(route -> route.getPath().equals("/_flight/info/{nodeId}") && route.getMethod() == RestRequest.Method.GET) + ); + } + + public void testFlightInfoRequest() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) + .withPath("/_flight/info") + .build(); + SetOnce executeCalled = new SetOnce<>(); + verifyingClient.setExecuteVerifier((action, actionRequest) -> { + assertEquals(NodesFlightInfoAction.INSTANCE.name(), action.name()); + assertNotNull(actionRequest); + executeCalled.set(true); + return new NodesFlightInfoResponse( + new ClusterName("test-cluster"), + Collections.singletonList(new NodeFlightInfo(mock(DiscoveryNode.class), mock(BoundTransportAddress.class))), + Collections.emptyList() + ); + }); + dispatchRequest(request); + assertEquals(Boolean.TRUE, executeCalled.get()); + } + + public void testFlightInfoRequestWithNodeId() throws Exception { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) + .withPath("/_flight/info/local_node") + .build(); + SetOnce executeCalled = new SetOnce<>(); + verifyingClient.setExecuteVerifier((action, actionRequest) -> { + assertEquals(NodesFlightInfoAction.INSTANCE.name(), action.name()); + assertNotNull(actionRequest); + executeCalled.set(true); + return null; + }); + dispatchRequest(request); + assertEquals(Boolean.TRUE, executeCalled.get()); + } + + public void testFlightInfoRequestWithInvalidPath() throws Exception { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) + .withPath("/_flight/invalid_path") + .build(); + SetOnce executeCalled = new SetOnce<>(); + verifyingClient.setExecuteVerifier((action, actionRequest) -> { + assertEquals(NodesFlightInfoAction.INSTANCE.name(), action.name()); + assertNotNull(actionRequest); + executeCalled.set(true); + return new NodesFlightInfoResponse( + new ClusterName("test-cluster"), + Collections.singletonList(new NodeFlightInfo(mock(DiscoveryNode.class), mock(BoundTransportAddress.class))), + Collections.emptyList() + ); + }); + dispatchRequest(request); + assertNull(executeCalled.get()); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodeFlightInfoTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodeFlightInfoTests.java new file mode 100644 index 0000000000000..2f8d7deb06f3f --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodeFlightInfoTests.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.test.OpenSearchTestCase; + +import java.net.InetAddress; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; + +@SuppressWarnings("unchecked") +public class NodeFlightInfoTests extends OpenSearchTestCase { + + public void testNodeFlightInfoSerialization() throws Exception { + DiscoveryNode node = new DiscoveryNode( + "test_node", + "test_node", + "hostname", + "localhost", + "127.0.0.1", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + new HashMap<>(), + new HashSet<>(), + Version.CURRENT + ); + + TransportAddress address = new TransportAddress(InetAddress.getLoopbackAddress(), 47470); + BoundTransportAddress boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + + NodeFlightInfo originalInfo = new NodeFlightInfo(node, boundAddress); + + BytesStreamOutput output = new BytesStreamOutput(); + originalInfo.writeTo(output); + + StreamInput input = output.bytes().streamInput(); + NodeFlightInfo deserializedInfo = new NodeFlightInfo(input); + + assertEquals(originalInfo.getNode(), deserializedInfo.getNode()); + assertEquals(originalInfo.getBoundAddress().boundAddresses().length, deserializedInfo.getBoundAddress().boundAddresses().length); + assertEquals(originalInfo.getBoundAddress().boundAddresses()[0], deserializedInfo.getBoundAddress().boundAddresses()[0]); + assertEquals(originalInfo.getBoundAddress().publishAddress(), deserializedInfo.getBoundAddress().publishAddress()); + } + + public void testNodeFlightInfoEquality() throws Exception { + DiscoveryNode node = new DiscoveryNode( + "test_node", + "test_node", + "hostname", + "localhost", + "127.0.0.1", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + new HashMap<>(), + new HashSet<>(), + Version.CURRENT + ); + + TransportAddress address = new TransportAddress(InetAddress.getLoopbackAddress(), 47470); + BoundTransportAddress boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + + NodeFlightInfo info1 = new NodeFlightInfo(node, boundAddress); + NodeFlightInfo info2 = new NodeFlightInfo(node, boundAddress); + + assertEquals(info1.getBoundAddress(), info2.getBoundAddress()); + } + + public void testGetters() throws Exception { + DiscoveryNode node = new DiscoveryNode( + "test_node", + "test_node", + "hostname", + "localhost", + "127.0.0.1", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + new HashMap<>(), + new HashSet<>(), + Version.CURRENT + ); + + TransportAddress address = new TransportAddress(InetAddress.getLoopbackAddress(), 47470); + BoundTransportAddress boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + + NodeFlightInfo info = new NodeFlightInfo(node, boundAddress); + + assertEquals(node, info.getNode()); + assertEquals(boundAddress, info.getBoundAddress()); + } + + public void testToXContent() throws Exception { + TransportAddress boundAddress1 = new TransportAddress(InetAddress.getLoopbackAddress(), 47470); + TransportAddress boundAddress2 = new TransportAddress(InetAddress.getLoopbackAddress(), 47471); + TransportAddress publishAddress = new TransportAddress(InetAddress.getLoopbackAddress(), 47472); + + BoundTransportAddress boundAddress = new BoundTransportAddress( + new TransportAddress[] { boundAddress1, boundAddress2 }, + publishAddress + ); + + NodeFlightInfo info = new NodeFlightInfo( + new DiscoveryNode( + "test_node", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + Collections.emptyMap(), + Collections.emptySet(), + Version.CURRENT + ), + boundAddress + ); + + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + builder.field("node_info"); + info.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + Map responseMap = parser.map(); + + Map nodeInfo = (Map) responseMap.get("node_info"); + assertNotNull("node_info object should exist", nodeInfo); + + Map flightServer = (Map) nodeInfo.get("flight_server"); + assertNotNull("flight_server object should exist", flightServer); + + List> boundAddresses = (List>) flightServer.get("bound_addresses"); + assertNotNull("bound_addresses array should exist", boundAddresses); + assertEquals("Should have 2 bound addresses", 2, boundAddresses.size()); + + assertEquals("localhost", boundAddresses.get(0).get("host")); + assertEquals(47470, boundAddresses.get(0).get("port")); + + assertEquals("localhost", boundAddresses.get(1).get("host")); + assertEquals(47471, boundAddresses.get(1).get("port")); + + Map publishAddressMap = (Map) flightServer.get("publish_address"); + assertNotNull("publish_address object should exist", publishAddressMap); + assertEquals("localhost", publishAddressMap.get("host")); + assertEquals(47472, publishAddressMap.get("port")); + } + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequestTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequestTests.java new file mode 100644 index 0000000000000..756177423fe6f --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequestTests.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +public class NodesFlightInfoRequestTests extends OpenSearchTestCase { + + public void testNodesFlightInfoRequestSerialization() throws Exception { + NodesFlightInfoRequest originalRequest = new NodesFlightInfoRequest("node1", "node2"); + + BytesStreamOutput output = new BytesStreamOutput(); + originalRequest.writeTo(output); + + StreamInput input = output.bytes().streamInput(); + NodesFlightInfoRequest deserializedRequest = new NodesFlightInfoRequest(input); + + assertArrayEquals(originalRequest.nodesIds(), deserializedRequest.nodesIds()); + } + + public void testNodesFlightInfoRequestConcreteNodes() { + String[] nodeIds = new String[] { "node1", "node2" }; + NodesFlightInfoRequest request = new NodesFlightInfoRequest(nodeIds); + assertArrayEquals(nodeIds, request.nodesIds()); + } + + public void testNodesFlightInfoRequestAllNodes() { + NodesFlightInfoRequest request = new NodesFlightInfoRequest(); + assertEquals(0, request.nodesIds().length); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponseTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponseTests.java new file mode 100644 index 0000000000000..49a6cc6bacf40 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponseTests.java @@ -0,0 +1,241 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.Version; +import org.opensearch.action.FailedNodeException; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.test.OpenSearchTestCase; + +import java.net.ConnectException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; + +@SuppressWarnings("unchecked") +public class NodesFlightInfoResponseTests extends OpenSearchTestCase { + + public void testNodesFlightInfoResponseSerialization() throws Exception { + ClusterName clusterName = new ClusterName("test-cluster"); + List nodes = new ArrayList<>(); + + DiscoveryNode node1 = createTestNode("node1"); + DiscoveryNode node2 = createTestNode("node2"); + + nodes.add(createNodeFlightInfo(node1, 47470)); + nodes.add(createNodeFlightInfo(node2, 47471)); + + NodesFlightInfoResponse originalResponse = new NodesFlightInfoResponse(clusterName, nodes, List.of()); + + BytesStreamOutput output = new BytesStreamOutput(); + originalResponse.writeTo(output); + + StreamInput input = output.bytes().streamInput(); + NodesFlightInfoResponse deserializedResponse = new NodesFlightInfoResponse(input); + assertEquals(originalResponse.getNodes().size(), deserializedResponse.getNodes().size()); + + for (int i = 0; i < originalResponse.getNodes().size(); i++) { + NodeFlightInfo originalNode = originalResponse.getNodes().get(i); + NodeFlightInfo deserializedNode = deserializedResponse.getNodes().get(i); + + assertEquals(originalNode.getNode().getId(), deserializedNode.getNode().getId()); + assertEquals(originalNode.getNode().getName(), deserializedNode.getNode().getName()); + assertEquals(originalNode.getBoundAddress().publishAddress(), deserializedNode.getBoundAddress().publishAddress()); + } + assertEquals(originalResponse.getClusterName(), deserializedResponse.getClusterName()); + } + + public void testNodesFlightInfoResponseEmpty() { + ClusterName clusterName = new ClusterName("test-cluster"); + List nodes = new ArrayList<>(); + + NodesFlightInfoResponse response = new NodesFlightInfoResponse(clusterName, nodes, List.of()); + + assertTrue(response.getNodes().isEmpty()); + assertEquals(clusterName, response.getClusterName()); + } + + public void testToXContentWithFailures() throws Exception { + NodesFlightInfoResponse response = getNodesFlightInfoResponse(); + + XContentBuilder builder = JsonXContent.contentBuilder(); + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + Map responseMap = parser.map(); + + Map nodesStats = (Map) responseMap.get("_nodes"); + assertNotNull("_nodes object should exist", nodesStats); + assertEquals(2, nodesStats.get("total")); + assertEquals(2, nodesStats.get("successful")); + assertEquals(2, nodesStats.get("failed")); + + assertEquals("test-cluster", responseMap.get("cluster_name")); + + Map nodes = (Map) responseMap.get("nodes"); + assertNotNull("nodes object should exist", nodes); + assertEquals(2, nodes.size()); + + Map firstNode = (Map) nodes.get("successful_node_1"); + assertNotNull(firstNode); + Map firstNodeFlightServer = (Map) firstNode.get("flight_server"); + assertNotNull(firstNodeFlightServer); + Map firstNodePublishAddress = (Map) firstNodeFlightServer.get("publish_address"); + assertEquals("localhost", firstNodePublishAddress.get("host")); + assertEquals(47470, firstNodePublishAddress.get("port")); + + Map secondNode = (Map) nodes.get("successful_node_2"); + assertNotNull(secondNode); + Map secondNodeFlightServer = (Map) secondNode.get("flight_server"); + assertNotNull(secondNodeFlightServer); + Map secondNodePublishAddress = (Map) secondNodeFlightServer.get("publish_address"); + assertEquals("localhost", secondNodePublishAddress.get("host")); + assertEquals(47471, secondNodePublishAddress.get("port")); + + List> failuresList = (List>) responseMap.get("failures"); + assertNotNull("failures array should exist", failuresList); + assertEquals(2, failuresList.size()); + + Map firstFailure = failuresList.get(0); + assertEquals("failed_node_1", firstFailure.get("node_id")); + assertEquals("Connection refused", firstFailure.get("reason")); + + Map secondFailure = failuresList.get(1); + assertEquals("failed_node_2", secondFailure.get("node_id")); + assertEquals("Node not found", secondFailure.get("reason")); + } + } + + private static NodesFlightInfoResponse getNodesFlightInfoResponse() { + DiscoveryNode node1 = new DiscoveryNode( + "successful_node_1", + "successful_node_1", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + Collections.emptyMap(), + Collections.emptySet(), + Version.CURRENT + ); + + List successfulNodes = getNodeFlightInfos(node1); + + return getNodesFlightInfoResponse(successfulNodes); + } + + private static NodesFlightInfoResponse getNodesFlightInfoResponse(List successfulNodes) { + List failures = Arrays.asList( + new FailedNodeException("failed_node_1", "Connection refused", new ConnectException("Connection refused")), + new FailedNodeException("failed_node_2", "Node not found", new Exception("Node not found")) + ); + + return new NodesFlightInfoResponse(new ClusterName("test-cluster"), successfulNodes, failures); + } + + private static List getNodeFlightInfos(DiscoveryNode node1) { + DiscoveryNode node2 = new DiscoveryNode( + "successful_node_2", + "successful_node_2", + new TransportAddress(InetAddress.getLoopbackAddress(), 9301), + Collections.emptyMap(), + Collections.emptySet(), + Version.CURRENT + ); + + TransportAddress address1 = new TransportAddress(InetAddress.getLoopbackAddress(), 47470); + return getNodeFlightInfos(node1, address1, node2); + } + + private static List getNodeFlightInfos(DiscoveryNode node1, TransportAddress address1, DiscoveryNode node2) { + BoundTransportAddress boundAddress1 = new BoundTransportAddress(new TransportAddress[] { address1 }, address1); + + TransportAddress address2 = new TransportAddress(InetAddress.getLoopbackAddress(), 47471); + BoundTransportAddress boundAddress2 = new BoundTransportAddress(new TransportAddress[] { address2 }, address2); + + return Arrays.asList(new NodeFlightInfo(node1, boundAddress1), new NodeFlightInfo(node2, boundAddress2)); + } + + public void testToXContentWithNoFailures() throws Exception { + NodesFlightInfoResponse response = getFlightInfoResponse(); + + XContentBuilder builder = JsonXContent.contentBuilder(); + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + Map responseMap = parser.map(); + + Map nodesStats = (Map) responseMap.get("_nodes"); + assertNotNull(nodesStats); + assertEquals(1, nodesStats.get("total")); + assertEquals(1, nodesStats.get("successful")); + assertEquals(0, nodesStats.get("failed")); + + assertEquals("test-cluster", responseMap.get("cluster_name")); + + Map nodes = (Map) responseMap.get("nodes"); + assertNotNull(nodes); + assertEquals(1, nodes.size()); + + assertNull("failures array should not exist", responseMap.get("failures")); + } + } + + private static NodesFlightInfoResponse getFlightInfoResponse() { + DiscoveryNode node = new DiscoveryNode( + "successful_node", + "successful_node", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + Collections.emptyMap(), + Collections.emptySet(), + Version.CURRENT + ); + + TransportAddress address = new TransportAddress(InetAddress.getLoopbackAddress(), 47470); + BoundTransportAddress boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + + return new NodesFlightInfoResponse( + new ClusterName("test-cluster"), + Collections.singletonList(new NodeFlightInfo(node, boundAddress)), + Collections.emptyList() + ); + } + + private DiscoveryNode createTestNode(String nodeId) { + return new DiscoveryNode( + nodeId, + nodeId, + "host" + nodeId, + "localhost", + "127.0.0.1", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + new HashMap<>(), + new HashSet<>(), + Version.CURRENT + ); + } + + private NodeFlightInfo createNodeFlightInfo(DiscoveryNode node, int port) { + TransportAddress address = new TransportAddress(InetAddress.getLoopbackAddress(), port); + BoundTransportAddress boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + return new NodeFlightInfo(node, boundAddress); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoActionTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoActionTests.java new file mode 100644 index 0000000000000..d9d8af5920d61 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoActionTests.java @@ -0,0 +1,176 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.Version; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.arrow.flight.bootstrap.FlightService; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.junit.Before; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportNodesFlightInfoActionTests extends OpenSearchTestCase { + + private DiscoveryNode localNode; + private TransportNodesFlightInfoAction action; + private BoundTransportAddress boundAddress; + + @Before + public void setUp() throws Exception { + super.setUp(); + + localNode = new DiscoveryNode( + "local_node", + "local_node", + "host", + "localhost", + "127.0.0.1", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + new HashMap<>(), + new HashSet<>(), + Version.CURRENT + ); + + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterName()).thenReturn(new ClusterName("test-cluster")); + when(clusterService.localNode()).thenReturn(localNode); + + TransportAddress address = new TransportAddress(InetAddress.getLoopbackAddress(), 47470); + boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + + FlightService flightService = mock(FlightService.class); + when(flightService.getBoundAddress()).thenReturn(boundAddress); + + action = new TransportNodesFlightInfoAction( + Settings.EMPTY, + mock(ThreadPool.class), + clusterService, + mock(TransportService.class), + new ActionFilters(Collections.emptySet()), + flightService + ); + } + + public void testNewResponse() { + NodesFlightInfoRequest request = new NodesFlightInfoRequest(); + List nodeFlightInfos = Collections.singletonList(new NodeFlightInfo(localNode, boundAddress)); + List failures = Collections.emptyList(); + + NodesFlightInfoResponse response = action.newResponse(request, nodeFlightInfos, failures); + + assertNotNull(response); + assertEquals("test-cluster", response.getClusterName().value()); + assertEquals(1, response.getNodes().size()); + assertEquals(0, response.failures().size()); + + NodeFlightInfo nodeInfo = response.getNodes().get(0); + assertEquals(localNode, nodeInfo.getNode()); + assertEquals(boundAddress, nodeInfo.getBoundAddress()); + } + + public void testNewResponseWithFailures() { + NodesFlightInfoRequest request = new NodesFlightInfoRequest(); + List nodeFlightInfos = Collections.emptyList(); + List failures = Collections.singletonList(new FailedNodeException("failed_node", "test failure", null)); + + NodesFlightInfoResponse response = action.newResponse(request, nodeFlightInfos, failures); + + assertNotNull(response); + assertEquals("test-cluster", response.getClusterName().value()); + assertEquals(0, response.getNodes().size()); + assertEquals(1, response.failures().size()); + assertEquals("failed_node", response.failures().get(0).nodeId()); + assertEquals("test failure", response.failures().get(0).getMessage()); + } + + public void testNewNodeRequest() { + NodesFlightInfoRequest request = new NodesFlightInfoRequest("node1", "node2"); + NodesFlightInfoRequest.NodeFlightInfoRequest nodeRequest = action.newNodeRequest(request); + + assertNotNull(nodeRequest); + assertArrayEquals(new String[] { "node1", "node2" }, nodeRequest.request.nodesIds()); + } + + public void testNewNodeResponse() throws IOException { + NodeFlightInfo nodeInfo = new NodeFlightInfo(localNode, boundAddress); + BytesStreamOutput out = new BytesStreamOutput(); + nodeInfo.writeTo(out); + StreamInput in = out.bytes().streamInput(); + + NodeFlightInfo deserializedInfo = action.newNodeResponse(in); + + assertNotNull(deserializedInfo); + assertEquals(nodeInfo.getNode(), deserializedInfo.getNode()); + assertEquals(nodeInfo.getBoundAddress().publishAddress(), deserializedInfo.getBoundAddress().publishAddress()); + } + + public void testNodeOperation() { + NodesFlightInfoRequest.NodeFlightInfoRequest nodeRequest = new NodesFlightInfoRequest.NodeFlightInfoRequest( + new NodesFlightInfoRequest() + ); + + NodeFlightInfo response = action.nodeOperation(nodeRequest); + + assertNotNull(response); + assertEquals(localNode, response.getNode()); + assertEquals(boundAddress.publishAddress(), response.getBoundAddress().publishAddress()); + } + + public void testNodeOperationWithSpecificNodes() throws IOException { + NodesFlightInfoRequest request = new NodesFlightInfoRequest("local_node"); + NodesFlightInfoRequest.NodeFlightInfoRequest nodeRequest = new NodesFlightInfoRequest.NodeFlightInfoRequest(request); + + NodeFlightInfo response = action.nodeOperation(nodeRequest); + + assertNotNull(response); + assertEquals(localNode, response.getNode()); + assertEquals(boundAddress, response.getBoundAddress()); + } + + public void testNodeOperationWithInvalidNode() throws IOException { + NodesFlightInfoRequest request = new NodesFlightInfoRequest("invalid_node"); + NodesFlightInfoRequest.NodeFlightInfoRequest nodeRequest = new NodesFlightInfoRequest.NodeFlightInfoRequest(request); + + NodeFlightInfo response = action.nodeOperation(nodeRequest); + + assertNotNull(response); + assertEquals(localNode, response.getNode()); + assertEquals(boundAddress, response.getBoundAddress()); + } + + public void testSerialization() throws IOException { + NodesFlightInfoRequest request = new NodesFlightInfoRequest("node1", "node2"); + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput in = out.bytes().streamInput(); + NodesFlightInfoRequest deserializedRequest = new NodesFlightInfoRequest(in); + + assertArrayEquals(request.nodesIds(), deserializedRequest.nodesIds()); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java new file mode 100644 index 0000000000000..acc32d6b32f4c --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java @@ -0,0 +1,384 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.arrow.flight.bootstrap; + +import org.apache.arrow.flight.FlightClient; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.opensearch.Version; +import org.opensearch.arrow.flight.api.NodeFlightInfo; +import org.opensearch.arrow.flight.api.NodesFlightInfoAction; +import org.opensearch.arrow.flight.api.NodesFlightInfoRequest; +import org.opensearch.arrow.flight.api.NodesFlightInfoResponse; +import org.opensearch.arrow.flight.bootstrap.tls.SslContextProvider; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.client.Client; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import io.netty.channel.EventLoopGroup; +import io.netty.util.NettyRuntime; + +import static org.opensearch.arrow.flight.bootstrap.FlightClientManager.LOCATION_TIMEOUT_MS; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.when; + +@SuppressWarnings("unchecked") +public class FlightClientManagerTests extends OpenSearchTestCase { + + private static BufferAllocator allocator; + private static EventLoopGroup elg; + private static ExecutorService executorService; + private static final AtomicInteger port = new AtomicInteger(0); + + private ClusterService clusterService; + private Client client; + private ClusterState state; + private FlightClientManager clientManager; + private ScheduledExecutorService locationUpdaterExecutor; + + @BeforeClass + public static void setupClass() throws Exception { + ServerConfig.init(Settings.EMPTY); + allocator = new RootAllocator(); + elg = ServerConfig.createELG("test-grpc-worker-elg", NettyRuntime.availableProcessors() * 2); + executorService = ServerConfig.createELG("test-grpc-worker", NettyRuntime.availableProcessors() * 2); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + locationUpdaterExecutor = Executors.newScheduledThreadPool(1); + + FeatureFlagSetter.set(FeatureFlags.ARROW_STREAMS_SETTING.getKey()); + clusterService = mock(ClusterService.class); + client = mock(Client.class); + state = getDefaultState(); + when(clusterService.state()).thenReturn(state); + + mockFlightInfoResponse(state.nodes(), 0); + + SslContextProvider sslContextProvider = null; + + ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.executor(ServerConfig.FLIGHT_CLIENT_THREAD_POOL_NAME)).thenReturn(executorService); + clientManager = new FlightClientManager(allocator, clusterService, sslContextProvider, elg, threadPool, client); + ClusterChangedEvent event = new ClusterChangedEvent("test", state, ClusterState.EMPTY_STATE); + clientManager.clusterChanged(event); + assertBusy(() -> { + assertEquals("Flight client isn't built in time limit", 2, clientManager.getFlightClients().size()); + assertNotNull("local_node should exist", clientManager.getFlightClient("local_node").get()); + assertNotNull("remote_node should exist", clientManager.getFlightClient("remote_node").get()); + }, 2, TimeUnit.SECONDS); + } + + private void mockFlightInfoResponse(DiscoveryNodes nodes, int sleepDuration) { + doAnswer(invocation -> { + locationUpdaterExecutor.schedule(() -> { + try { + NodesFlightInfoRequest request = invocation.getArgument(1); + ActionListener listener = invocation.getArgument(2); + + List nodeInfos = new ArrayList<>(); + for (DiscoveryNode node : nodes) { + if (request.nodesIds().length == 0 || Arrays.asList(request.nodesIds()).contains(node.getId())) { + int flightPort = getBaseStreamPort() + port.addAndGet(2); + TransportAddress address = new TransportAddress( + InetAddress.getByName(node.getAddress().getAddress()), + flightPort + ); + BoundTransportAddress boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + NodeFlightInfo nodeInfo = new NodeFlightInfo(node, boundAddress); + nodeInfos.add(nodeInfo); + } + } + NodesFlightInfoResponse response = new NodesFlightInfoResponse(ClusterName.DEFAULT, nodeInfos, Collections.emptyList()); + listener.onResponse(response); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + }, sleepDuration, TimeUnit.MILLISECONDS); + return null; + }).when(client).execute(eq(NodesFlightInfoAction.INSTANCE), any(NodesFlightInfoRequest.class), any(ActionListener.class)); + + } + + @Override + public void tearDown() throws Exception { + locationUpdaterExecutor.shutdown(); + super.tearDown(); + clientManager.close(); + } + + private ClusterState getDefaultState() throws Exception { + int testPort = getBasePort() + port.addAndGet(2); + + DiscoveryNode localNode = createNode("local_node", "127.0.0.1", testPort); + DiscoveryNode remoteNode = createNode("remote_node", "127.0.0.2", testPort + 1); + + // Setup initial cluster state + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + nodesBuilder.add(remoteNode); + nodesBuilder.add(localNode); + nodesBuilder.localNodeId(localNode.getId()); + DiscoveryNodes nodes = nodesBuilder.build(); + + return ClusterState.builder(new ClusterName("test")).nodes(nodes).build(); + } + + private DiscoveryNode createNode(String nodeId, String host, int port) throws Exception { + TransportAddress address = new TransportAddress(InetAddress.getByName(host), port); + Map attributes = new HashMap<>(); + attributes.put("arrow.streams.enabled", "true"); + Set roles = Collections.singleton(DiscoveryNodeRole.DATA_ROLE); + return new DiscoveryNode(nodeId, address, attributes, roles, Version.CURRENT); + } + + @AfterClass + public static void tearClass() { + allocator.close(); + } + + public void testGetFlightClientForExistingNode() { + validateNodes(); + } + + public void testGetFlightClientForNonExistentNode() throws Exception { + assertFalse(clientManager.getFlightClient("non_existent_node").isPresent()); + } + + public void testClusterChangedWithNodesChanged() throws Exception { + DiscoveryNode newNode = createNode("new_node", "127.0.0.3", getBasePort() + port.addAndGet(1)); + DiscoveryNodes.Builder newNodesBuilder = DiscoveryNodes.builder(); + + for (DiscoveryNode node : state.nodes()) { + newNodesBuilder.add(node); + } + newNodesBuilder.localNodeId("local_node"); + // Update cluster state with new node + newNodesBuilder.add(newNode); + DiscoveryNodes newNodes = newNodesBuilder.build(); + + ClusterState newState = ClusterState.builder(new ClusterName("test")).nodes(newNodes).build(); + mockFlightInfoResponse(newNodes, 0); + when(clusterService.state()).thenReturn(newState); + clientManager.clusterChanged(new ClusterChangedEvent("test", newState, state)); + + for (DiscoveryNode node : newState.nodes()) { + assertBusy( + () -> { assertTrue("Flight client isn't built in time limit", clientManager.getFlightClient(node.getId()).isPresent()); }, + 2, + TimeUnit.SECONDS + ); + } + } + + public void testClusterChangedWithNoNodesChanged() throws Exception { + ClusterChangedEvent event = new ClusterChangedEvent("test", state, state); + clientManager.clusterChanged(event); + + // Verify original client still exists + for (DiscoveryNode node : state.nodes()) { + assertNotNull(clientManager.getFlightClient(node.getId()).get()); + } + } + + public void testGetLocalNodeId() throws Exception { + assertEquals("Local node ID should match", "local_node", clientManager.getLocalNodeId()); + } + + public void testCloseWithActiveClients() throws Exception { + for (DiscoveryNode node : state.nodes()) { + FlightClient client = clientManager.getFlightClient(node.getId()).get(); + assertNotNull(client); + } + + clientManager.close(); + assertEquals(0, clientManager.getFlightClients().size()); + } + + public void testIncompatibleNodeVersion() throws Exception { + Map attributes = new HashMap<>(); + attributes.put("arrow.streams.enabled", "true"); + DiscoveryNode oldVersionNode = new DiscoveryNode( + "old_version_node", + new TransportAddress(InetAddress.getByName("127.0.0.3"), getBasePort() + port.addAndGet(1)), + attributes, + Collections.singleton(DiscoveryNodeRole.DATA_ROLE), + Version.fromString("2.18.0") // Version before Arrow Flight introduction + ); + + // Update cluster state with old version node + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + nodesBuilder.add(oldVersionNode); + nodesBuilder.localNodeId("local_node"); + DiscoveryNodes nodes = nodesBuilder.build(); + ClusterState oldVersionState = ClusterState.builder(new ClusterName("test")).nodes(nodes).build(); + + when(clusterService.state()).thenReturn(oldVersionState); + mockFlightInfoResponse(nodes, 0); + + assertFalse(clientManager.getFlightClient(oldVersionNode.getId()).isPresent()); + } + + public void testGetFlightClientLocationTimeout() throws Exception { + reset(client); + + String nodeId = "test_node"; + DiscoveryNode testNode = createNode(nodeId, "127.0.0.1", getBasePort() + port.addAndGet(2)); + + // Update cluster state with the test node + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + nodesBuilder.add(testNode); + nodesBuilder.localNodeId(nodeId); + ClusterState newState = ClusterState.builder(new ClusterName("test")).nodes(nodesBuilder.build()).build(); + when(clusterService.state()).thenReturn(newState); + // Mock a delayed response that will cause timeout + mockFlightInfoResponse(newState.nodes(), LOCATION_TIMEOUT_MS + 100); + + ClusterChangedEvent event = new ClusterChangedEvent("test", newState, ClusterState.EMPTY_STATE); + clientManager.clusterChanged(event); + assertFalse(clientManager.getFlightClient(nodeId).isPresent()); + } + + public void testGetFlightClientLocationExecutionError() throws Exception { + reset(client); + + String nodeId = "test_node"; + DiscoveryNode testNode = createNode(nodeId, "127.0.0.1", getBasePort() + port.addAndGet(2)); + + // Update cluster state with the test node + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + nodesBuilder.add(testNode); + nodesBuilder.localNodeId(nodeId); + ClusterState newState = ClusterState.builder(new ClusterName("test")).nodes(nodesBuilder.build()).build(); + + when(clusterService.state()).thenReturn(newState); + + // Mock failure + doAnswer(invocation -> { + ActionListener listener = invocation.getArgument(2); + listener.onFailure(new RuntimeException("Test execution error")); + return null; + }).when(client).execute(eq(NodesFlightInfoAction.INSTANCE), any(NodesFlightInfoRequest.class), any(ActionListener.class)); + + ClusterChangedEvent event = new ClusterChangedEvent("test", newState, ClusterState.EMPTY_STATE); + clientManager.clusterChanged(event); + + assertFalse(clientManager.getFlightClient(nodeId).isPresent()); + } + + public void testFailedClusterUpdateButSuccessfulDirectRequest() throws Exception { + reset(client); + + String nodeId = "test_node"; + DiscoveryNode testNode = createNode(nodeId, "127.0.0.1", getBasePort() + port.addAndGet(2)); + + // Update cluster state with the test node + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + nodesBuilder.add(testNode); + nodesBuilder.localNodeId(nodeId); + ClusterState newState = ClusterState.builder(new ClusterName("test")).nodes(nodesBuilder.build()).build(); + + when(clusterService.state()).thenReturn(newState); + + // First mock call fails during cluster update + AtomicBoolean firstCall = new AtomicBoolean(true); + doAnswer(invocation -> { + locationUpdaterExecutor.schedule(() -> { + ActionListener listener = invocation.getArgument(2); + if (firstCall.getAndSet(false)) { + // Fail on first call (during cluster update) + listener.onFailure(new RuntimeException("Failed during cluster update")); + } else { + // Succeed on second call (direct request) + try { + NodesFlightInfoRequest request = invocation.getArgument(1); + List nodeInfos = new ArrayList<>(); + for (DiscoveryNode node : newState.nodes()) { + if (request.nodesIds().length == 0 || Arrays.asList(request.nodesIds()).contains(node.getId())) { + int flightPort = getBaseStreamPort() + port.addAndGet(2); + TransportAddress address = new TransportAddress( + InetAddress.getByName(node.getAddress().getAddress()), + flightPort + ); + BoundTransportAddress boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + NodeFlightInfo nodeInfo = new NodeFlightInfo(node, boundAddress); + nodeInfos.add(nodeInfo); + } + } + NodesFlightInfoResponse response = new NodesFlightInfoResponse( + ClusterName.DEFAULT, + nodeInfos, + Collections.emptyList() + ); + listener.onResponse(response); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + } + }, 0, TimeUnit.MICROSECONDS); + return null; + }).when(client).execute(eq(NodesFlightInfoAction.INSTANCE), any(NodesFlightInfoRequest.class), any(ActionListener.class)); + + ClusterChangedEvent event = new ClusterChangedEvent("test", newState, ClusterState.EMPTY_STATE); + clientManager.clusterChanged(event); + + // Verify that the client can still be created successfully on direct request + clientManager.buildClientAsync(nodeId); + assertBusy(() -> { + assertTrue("Flight client should be created successfully on direct request", clientManager.getFlightClient(nodeId).isPresent()); + }, 2, TimeUnit.SECONDS); + assertFalse("first call should be invoked", firstCall.get()); + } + + private void validateNodes() { + for (DiscoveryNode node : state.nodes()) { + FlightClient client = clientManager.getFlightClient(node.getId()).get(); + assertNotNull("Flight client should be created for existing node", client); + } + } + + protected static int getBaseStreamPort() { + return getBasePort(9401); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java new file mode 100644 index 0000000000000..fa20535384557 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.arrow.flight.bootstrap; + +import org.opensearch.Version; +import org.opensearch.arrow.flight.bootstrap.tls.SslContextProvider; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.client.Client; + +import java.net.InetAddress; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class FlightServiceTests extends OpenSearchTestCase { + + private Settings settings; + private ClusterService clusterService; + private NetworkService networkService; + private ThreadPool threadPool; + private final AtomicInteger port = new AtomicInteger(0); + private DiscoveryNode localNode; + + @Override + public void setUp() throws Exception { + super.setUp(); + FeatureFlagSetter.set(FeatureFlags.ARROW_STREAMS_SETTING.getKey()); + int availablePort = getBasePort(9500) + port.addAndGet(1); + settings = Settings.EMPTY; + localNode = createNode(availablePort); + + // Setup initial cluster state + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + nodesBuilder.localNodeId(localNode.getId()); + nodesBuilder.add(localNode); + DiscoveryNodes nodes = nodesBuilder.build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(nodes).build(); + clusterService = mock(ClusterService.class); + when(clusterService.state()).thenReturn(clusterState); + + threadPool = mock(ThreadPool.class); + when(threadPool.executor(ServerConfig.FLIGHT_SERVER_THREAD_POOL_NAME)).thenReturn(mock(ExecutorService.class)); + when(threadPool.executor(ServerConfig.FLIGHT_CLIENT_THREAD_POOL_NAME)).thenReturn(mock(ExecutorService.class)); + networkService = new NetworkService(Collections.emptyList()); + } + + public void testInitializeWithSslDisabled() throws Exception { + + Settings noSslSettings = Settings.builder().put("arrow.ssl.enable", false).build(); + + try (FlightService noSslService = new FlightService(noSslSettings)) { + noSslService.setClusterService(clusterService); + noSslService.setThreadPool(threadPool); + noSslService.setClient(mock(Client.class)); + noSslService.setNetworkService(networkService); + noSslService.start(); + SslContextProvider sslContextProvider = noSslService.getSslContextProvider(); + assertNull("SSL context provider should be null", sslContextProvider); + assertNotNull(noSslService.getFlightClientManager()); + assertNotNull(noSslService.getBoundAddress()); + } + } + + public void testStartAndStop() throws Exception { + try (FlightService testService = new FlightService(Settings.EMPTY)) { + testService.setClusterService(clusterService); + testService.setThreadPool(threadPool); + testService.setClient(mock(Client.class)); + testService.setNetworkService(networkService); + testService.start(); + testService.stop(); + testService.start(); + assertNull(testService.getStreamManager()); + } + } + + public void testInitializeWithoutSecureTransportSettingsProvider() { + Settings sslSettings = Settings.builder().put(settings).put("arrow.ssl.enable", true).build(); + + try (FlightService sslService = new FlightService(sslSettings)) { + // Should throw exception when initializing without provider + expectThrows(RuntimeException.class, () -> { + sslService.setClusterService(clusterService); + sslService.setThreadPool(threadPool); + sslService.setClient(mock(Client.class)); + sslService.setNetworkService(networkService); + sslService.start(); + }); + } + } + + public void testServerStartupFailure() { + Settings invalidSettings = Settings.builder() + .put(ServerComponents.SETTING_FLIGHT_PUBLISH_PORT.getKey(), "-100") // Invalid port + .build(); + try (FlightService invalidService = new FlightService(invalidSettings)) { + invalidService.setClusterService(clusterService); + invalidService.setThreadPool(threadPool); + invalidService.setClient(mock(Client.class)); + invalidService.setNetworkService(networkService); + expectThrows(RuntimeException.class, () -> { invalidService.doStart(); }); + } + } + + public void testLifecycleStateTransitions() throws Exception { + // Find new port for this test + try (FlightService testService = new FlightService(Settings.EMPTY)) { + testService.setClusterService(clusterService); + testService.setThreadPool(threadPool); + testService.setClient(mock(Client.class)); + testService.setNetworkService(networkService); + // Test all state transitions + testService.start(); + assertEquals("STARTED", testService.lifecycleState().toString()); + + testService.stop(); + assertEquals("STOPPED", testService.lifecycleState().toString()); + + testService.close(); + assertEquals("CLOSED", testService.lifecycleState().toString()); + } + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + } + + private DiscoveryNode createNode(int port) throws Exception { + TransportAddress address = new TransportAddress(InetAddress.getByName("127.0.0.1"), port); + Map attributes = new HashMap<>(); + attributes.put("arrow.streams.enabled", "true"); + + Set roles = Collections.singleton(DiscoveryNodeRole.DATA_ROLE); + return new DiscoveryNode("local_node", address, attributes, roles, Version.CURRENT); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/ServerConfigTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/ServerConfigTests.java new file mode 100644 index 0000000000000..9419e26318046 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/ServerConfigTests.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.arrow.flight.bootstrap; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ScalingExecutorBuilder; + +import static org.opensearch.arrow.flight.bootstrap.ServerComponents.SETTING_FLIGHT_PUBLISH_PORT; + +public class ServerConfigTests extends OpenSearchTestCase { + + private Settings settings; + + @Override + public void setUp() throws Exception { + super.setUp(); + settings = Settings.builder() + .put("arrow.allocation.manager.type", "Netty") + .put("arrow.enable_null_check_for_get", false) + .put("arrow.enable_unsafe_memory_access", true) + .put("arrow.memory.debug.allocator", false) + .put("arrow.ssl.enable", true) + .put("thread_pool.flight-server.min", 1) + .put("thread_pool.flight-server.max", 4) + .put("thread_pool.flight-server.keep_alive", TimeValue.timeValueMinutes(5)) + .build(); + } + + public void testInit() { + ServerConfig.init(settings); + + // Verify system properties are set correctly + assertEquals("Netty", System.getProperty("arrow.allocation.manager.type")); + assertEquals("false", System.getProperty("arrow.enable_null_check_for_get")); + assertEquals("true", System.getProperty("arrow.enable_unsafe_memory_access")); + assertEquals("false", System.getProperty("arrow.memory.debug.allocator")); + + // Verify SSL settings + assertTrue(ServerConfig.isSslEnabled()); + + ScalingExecutorBuilder executorBuilder = ServerConfig.getServerExecutorBuilder(); + assertNotNull(executorBuilder); + assertEquals(3, executorBuilder.getRegisteredSettings().size()); + assertEquals(1, executorBuilder.getRegisteredSettings().get(0).get(settings)); // min + assertEquals(4, executorBuilder.getRegisteredSettings().get(1).get(settings)); // max + assertEquals(TimeValue.timeValueMinutes(5), executorBuilder.getRegisteredSettings().get(2).get(settings)); // keep alive + } + + public void testGetSettings() { + var settings = ServerConfig.getSettings(); + assertNotNull(settings); + assertFalse(settings.isEmpty()); + + assertTrue(settings.contains(ServerConfig.ARROW_ALLOCATION_MANAGER_TYPE)); + assertTrue(settings.contains(ServerConfig.ARROW_ENABLE_NULL_CHECK_FOR_GET)); + assertTrue(settings.contains(ServerConfig.ARROW_ENABLE_UNSAFE_MEMORY_ACCESS)); + assertTrue(settings.contains(ServerConfig.ARROW_ENABLE_DEBUG_ALLOCATOR)); + assertTrue(settings.contains(ServerConfig.ARROW_SSL_ENABLE)); + } + + public void testDefaultSettings() { + Settings defaultSettings = Settings.EMPTY; + ServerConfig.init(defaultSettings); + + // Verify default values + assertEquals(-1, SETTING_FLIGHT_PUBLISH_PORT.get(defaultSettings).intValue()); + assertEquals("Netty", ServerConfig.ARROW_ALLOCATION_MANAGER_TYPE.get(defaultSettings)); + assertFalse(ServerConfig.ARROW_ENABLE_NULL_CHECK_FOR_GET.get(defaultSettings)); + assertTrue(ServerConfig.ARROW_ENABLE_UNSAFE_MEMORY_ACCESS.get(defaultSettings)); + assertFalse(ServerConfig.ARROW_ENABLE_DEBUG_ALLOCATOR.get(defaultSettings)); + assertFalse(ServerConfig.ARROW_SSL_ENABLE.get(defaultSettings)); + } +} diff --git a/server/build.gradle b/server/build.gradle index e1512fb4b2c58..cb64d6becb315 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -69,6 +69,7 @@ dependencies { api project(":libs:opensearch-geo") api project(":libs:opensearch-telemetry") api project(":libs:opensearch-task-commons") + implementation project(':libs:opensearch-arrow-spi') compileOnly project(':libs:opensearch-plugin-classloader') testRuntimeOnly project(':libs:opensearch-plugin-classloader') diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index 59d999798868e..6753bb8eac083 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -39,6 +39,7 @@ protected FeatureFlagSettings( FeatureFlags.STAR_TREE_INDEX_SETTING, FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING, - FeatureFlags.TERM_VERSION_PRECOMMIT_ENABLE_SETTING + FeatureFlags.TERM_VERSION_PRECOMMIT_ENABLE_SETTING, + FeatureFlags.ARROW_STREAMS_SETTING ); } diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 6df68013a8119..4be45aed70023 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -128,6 +128,9 @@ public class FeatureFlags { Property.NodeScope ); + public static final String ARROW_STREAMS = "opensearch.experimental.feature.arrow.streams.enabled"; + public static final Setting ARROW_STREAMS_SETTING = Setting.boolSetting(ARROW_STREAMS, false, Property.NodeScope); + private static final List> ALL_FEATURE_FLAG_SETTINGS = List.of( REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, EXTENSIONS_SETTING, @@ -138,7 +141,8 @@ public class FeatureFlags { STAR_TREE_INDEX_SETTING, APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, READER_WRITER_SPLIT_EXPERIMENTAL_SETTING, - TERM_VERSION_PRECOMMIT_ENABLE_SETTING + TERM_VERSION_PRECOMMIT_ENABLE_SETTING, + ARROW_STREAMS_SETTING ); /** diff --git a/server/src/main/java/org/opensearch/plugins/DefaultSecureTransportParameters.java b/server/src/main/java/org/opensearch/plugins/DefaultSecureTransportParameters.java index e3771f224a7db..3265c582dba76 100644 --- a/server/src/main/java/org/opensearch/plugins/DefaultSecureTransportParameters.java +++ b/server/src/main/java/org/opensearch/plugins/DefaultSecureTransportParameters.java @@ -11,6 +11,13 @@ import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.TrustManagerFactory; + +import java.util.Collection; +import java.util.List; +import java.util.Optional; + /** * Default implementation of {@link SecureTransportSettingsProvider.SecureTransportParameters}. */ @@ -25,4 +32,34 @@ class DefaultSecureTransportParameters implements SecureTransportSettingsProvide public boolean dualModeEnabled() { return NetworkModule.TRANSPORT_SSL_DUAL_MODE_ENABLED.get(settings); } + + @Override + public Optional keyManagerFactory() { + return Optional.empty(); + } + + @Override + public Optional sslProvider() { + return Optional.empty(); + } + + @Override + public Optional clientAuth() { + return Optional.empty(); + } + + @Override + public Collection protocols() { + return List.of(); + } + + @Override + public Collection cipherSuites() { + return List.of(); + } + + @Override + public Optional trustManagerFactory() { + return Optional.empty(); + } } diff --git a/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java b/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java index 5f9e1a952b6e8..f4cf64c16cbd2 100644 --- a/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java +++ b/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java @@ -13,8 +13,10 @@ import org.opensearch.transport.Transport; import org.opensearch.transport.TransportAdapterProvider; +import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLException; +import javax.net.ssl.TrustManagerFactory; import java.util.Collection; import java.util.Collections; @@ -52,6 +54,18 @@ default Optional parameters(Settings settings) { @ExperimentalApi interface SecureTransportParameters { boolean dualModeEnabled(); + + Optional keyManagerFactory(); + + Optional sslProvider(); + + Optional clientAuth(); + + Collection protocols(); + + Collection cipherSuites(); + + Optional trustManagerFactory(); } /** diff --git a/server/src/main/java/org/opensearch/plugins/StreamManagerPlugin.java b/server/src/main/java/org/opensearch/plugins/StreamManagerPlugin.java new file mode 100644 index 0000000000000..60bdb789b3750 --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/StreamManagerPlugin.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.arrow.spi.StreamManager; + +import java.util.function.Supplier; + +/** + * An interface for OpenSearch plugins to implement to provide a StreamManager. + * Plugins can implement this interface to provide custom StreamManager implementation. + * @see StreamManager + */ +public interface StreamManagerPlugin { + /** + * Returns the StreamManager instance for this plugin. + * + * @return The StreamManager instance + */ + Supplier getStreamManager(); +} diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index 052b1a4e52eb9..0bd5d8afda91e 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -1768,7 +1768,7 @@ public static String getPortRange() { return getBasePort() + "-" + (getBasePort() + 99); // upper bound is inclusive } - protected static int getBasePort() { + protected static int getBasePort(int start) { // some tests use MockTransportService to do network based testing. Yet, we run tests in multiple JVMs that means // concurrent tests could claim port that another JVM just released and if that test tries to simulate a disconnect it might // be smart enough to re-connect depending on what is tested. To reduce the risk, since this is very hard to debug we use @@ -1792,7 +1792,11 @@ protected static int getBasePort() { startAt = (int) Math.floorMod(workerId - 1, 223L) + 1; } assert startAt >= 0 : "Unexpected test worker Id, resulting port range would be negative"; - return 10300 + (startAt * 100); + return start + (startAt * 100); + } + + protected static int getBasePort() { + return getBasePort(10300); } protected static InetAddress randomIp(boolean v4) { From ca8e4f871a1cfec70985aa6acea33f0c36484be5 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 21 Feb 2025 07:42:30 -0500 Subject: [PATCH 11/48] HTTP API calls hang with 'Accept-Encoding: zstd' (#17408) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../netty4/Netty4HttpServerTransport.java | 75 ++++++++++++++++++- .../Netty4HttpServerTransportTests.java | 5 +- 3 files changed, 77 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ab4138c452894..1ad53194361fa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) - Add highlighting for wildcard search on `match_only_text` field ([#17101](https://github.com/opensearch-project/OpenSearch/pull/17101)) - Fix illegal argument exception when creating a PIT ([#16781](https://github.com/opensearch-project/OpenSearch/pull/16781)) +- Fix HTTP API calls that hang with 'Accept-Encoding: zstd' ([#17408](https://github.com/opensearch-project/OpenSearch/pull/17408)) ### Security diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index 4970c42163ac3..7e2f3496e5c01 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -61,6 +61,8 @@ import java.net.InetSocketAddress; import java.net.SocketOption; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.TimeUnit; import io.netty.bootstrap.ServerBootstrap; @@ -77,6 +79,12 @@ import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.socket.nio.NioChannelOption; import io.netty.handler.codec.ByteToMessageDecoder; +import io.netty.handler.codec.compression.Brotli; +import io.netty.handler.codec.compression.CompressionOptions; +import io.netty.handler.codec.compression.DeflateOptions; +import io.netty.handler.codec.compression.GzipOptions; +import io.netty.handler.codec.compression.StandardCompressionOptions; +import io.netty.handler.codec.compression.ZstdEncoder; import io.netty.handler.codec.http.HttpContentCompressor; import io.netty.handler.codec.http.HttpContentDecompressor; import io.netty.handler.codec.http.HttpMessage; @@ -440,7 +448,7 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpMessage msg) throws E pipeline.addAfter( "aggregator", "encoder_compress", - new HttpContentCompressor(handlingSettings.getCompressionLevel()) + new HttpContentCompressor(defaultCompressionOptions(handlingSettings.getCompressionLevel())) ); } pipeline.addBefore("handler", "request_creator", requestCreator); @@ -467,7 +475,10 @@ protected void configureDefaultHttpPipeline(ChannelPipeline pipeline) { aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); pipeline.addLast("aggregator", aggregator); if (handlingSettings.isCompression()) { - pipeline.addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + pipeline.addLast( + "encoder_compress", + new HttpContentCompressor(defaultCompressionOptions(handlingSettings.getCompressionLevel())) + ); } pipeline.addLast("request_creator", requestCreator); pipeline.addLast("response_creator", responseCreator); @@ -512,7 +523,10 @@ protected void initChannel(Channel childChannel) throws Exception { if (handlingSettings.isCompression()) { childChannel.pipeline() - .addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + .addLast( + "encoder_compress", + new HttpContentCompressor(defaultCompressionOptions(handlingSettings.getCompressionLevel())) + ); } childChannel.pipeline() @@ -563,4 +577,59 @@ protected ChannelInboundHandlerAdapter createHeaderVerifier() { protected ChannelInboundHandlerAdapter createDecompressor() { return new HttpContentDecompressor(); } + + /** + * Copy of {@link HttpContentCompressor} default compression options with ZSTD excluded: + * although zstd-jni is on the classpath, {@link ZstdEncoder} requires direct buffers support + * which by default {@link NettyAllocator} does not provide. + * + * @param compressionLevel + * {@code 1} yields the fastest compression and {@code 9} yields the + * best compression. {@code 0} means no compression. The default + * compression level is {@code 6}. + * + * @return default compression options + */ + private static CompressionOptions[] defaultCompressionOptions(int compressionLevel) { + return defaultCompressionOptions(compressionLevel, 15, 8); + } + + /** + * Copy of {@link HttpContentCompressor} default compression options with ZSTD excluded: + * although zstd-jni is on the classpath, {@link ZstdEncoder} requires direct buffers support + * which by default {@link NettyAllocator} does not provide. + * + * @param compressionLevel + * {@code 1} yields the fastest compression and {@code 9} yields the + * best compression. {@code 0} means no compression. The default + * compression level is {@code 6}. + * @param windowBits + * The base two logarithm of the size of the history buffer. The + * value should be in the range {@code 9} to {@code 15} inclusive. + * Larger values result in better compression at the expense of + * memory usage. The default value is {@code 15}. + * @param memLevel + * How much memory should be allocated for the internal compression + * state. {@code 1} uses minimum memory and {@code 9} uses maximum + * memory. Larger values result in better and faster compression + * at the expense of memory usage. The default value is {@code 8} + * + * @return default compression options + */ + private static CompressionOptions[] defaultCompressionOptions(int compressionLevel, int windowBits, int memLevel) { + final List options = new ArrayList(4); + final GzipOptions gzipOptions = StandardCompressionOptions.gzip(compressionLevel, windowBits, memLevel); + final DeflateOptions deflateOptions = StandardCompressionOptions.deflate(compressionLevel, windowBits, memLevel); + + options.add(gzipOptions); + options.add(deflateOptions); + options.add(StandardCompressionOptions.snappy()); + + if (Brotli.isAvailable()) { + options.add(StandardCompressionOptions.brotli()); + } + + return options.toArray(new CompressionOptions[0]); + } + } diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java index d892918decfb5..05cd7c9fd90d3 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java @@ -393,7 +393,10 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th try (Netty4HttpClient client = Netty4HttpClient.http()) { DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); - request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, randomFrom("deflate", "gzip")); + // ZSTD is not supported at the moment by NettyAllocator (needs direct buffers), + // and Brotly is not on classpath. + final String contentEncoding = randomFrom("deflate", "gzip", "snappy", "br", "zstd"); + request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, contentEncoding); long numOfHugeAllocations = getHugeAllocationCount(); final FullHttpResponse response = client.send(remoteAddress.address(), request); try { From 664f254b67b9bd31eab389fcf9b81c1b761d49f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Feb 2025 08:01:06 -0500 Subject: [PATCH 12/48] Bump net.minidev:json-smart from 2.5.1 to 2.5.2 in /plugins/repository-hdfs (#17376) * Bump net.minidev:json-smart in /plugins/repository-hdfs Bumps [net.minidev:json-smart](https://github.com/netplex/json-smart-v2) from 2.5.1 to 2.5.2. - [Release notes](https://github.com/netplex/json-smart-v2/releases) - [Commits](https://github.com/netplex/json-smart-v2/compare/2.5.1...2.5.2) --- updated-dependencies: - dependency-name: net.minidev:json-smart dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 | 1 - plugins/repository-hdfs/licenses/json-smart-2.5.2.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/json-smart-2.5.2.jar.sha1 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index cf76c88c5482e..4cfb572929f54 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -81,7 +81,7 @@ dependencies { api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" api "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" - api 'net.minidev:json-smart:2.5.1' + api 'net.minidev:json-smart:2.5.2' api "io.netty:netty-all:${versions.netty}" implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" implementation 'org.codehaus.woodstox:stax2-api:4.2.2' diff --git a/plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 b/plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 deleted file mode 100644 index fe23968afce1e..0000000000000 --- a/plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c11d2808d009132dfbbf947ebf37de6bf266c8e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/json-smart-2.5.2.jar.sha1 b/plugins/repository-hdfs/licenses/json-smart-2.5.2.jar.sha1 new file mode 100644 index 0000000000000..97fc7b94f0fd2 --- /dev/null +++ b/plugins/repository-hdfs/licenses/json-smart-2.5.2.jar.sha1 @@ -0,0 +1 @@ +95d166b18f95907be0f46cdb9e1c0695eed03387 \ No newline at end of file From 84477373f70247774f1bd52ad708c393da7f7d7c Mon Sep 17 00:00:00 2001 From: kkewwei Date: Sat, 22 Feb 2025 03:29:50 +0800 Subject: [PATCH 13/48] Fix missing bucket in terms aggregation with missing value (#17418) Signed-off-by: kkewwei Signed-off-by: kkewwei --- CHANGELOG-3.0.md | 1 + .../aggregations/support/MissingValues.java | 2 +- .../bucket/terms/TermsAggregatorTests.java | 94 +++++++++++++++++++ 3 files changed, 96 insertions(+), 1 deletion(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index fc2fcd361f497..58e5e5cca3acb 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -70,6 +70,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) - Fix swapped field formats in nodes API where `total_indexing_buffer_in_bytes` and `total_indexing_buffer` values were reversed ([#17070](https://github.com/opensearch-project/OpenSearch/pull/17070)) - Add HTTP/2 protocol support to HttpRequest.HttpVersion ([#17248](https://github.com/opensearch-project/OpenSearch/pull/17248)) +- Fix missing bucket in terms aggregation with missing value ([#17418](https://github.com/opensearch-project/OpenSearch/pull/17418)) ### Security diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java index a5c685a0930e2..429a543281c76 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java @@ -359,7 +359,7 @@ public long getValueCount() { @Override public int docValueCount() { - return values.docValueCount(); + return Math.max(1, values.docValueCount()); } @Override diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index bc22d5f6ef2e8..e59b28d0a51ff 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.document.NumericDocValuesField; @@ -42,6 +43,8 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.NoMergePolicy; @@ -75,6 +78,8 @@ import org.opensearch.index.mapper.RangeFieldMapper; import org.opensearch.index.mapper.RangeType; import org.opensearch.index.mapper.SeqNoFieldMapper; +import org.opensearch.index.mapper.TextFieldMapper; +import org.opensearch.index.mapper.TextParams; import org.opensearch.index.mapper.Uid; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -1578,6 +1583,95 @@ public void testOrderByPipelineAggregation() throws Exception { } } + public void testBucketInTermsAggregationWithMissingValue() throws IOException { + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + // test text + { + FieldType type = TextParams.buildFieldType(() -> true, () -> false, () -> "positions", () -> false, () -> "no"); + Document document = new Document(); + document.add(new Field("mv_field", "name1", type)); + document.add(new Field("mv_field", "name2", type)); + indexWriter.addDocument(document); + document = new Document(); + document.add(new Field("mv_field1", "value1", type)); + indexWriter.addDocument(document); + document = new Document(); + document.add(new Field("mv_field1", "value2", type)); + indexWriter.addDocument(document); + indexWriter.flush(); + try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { + IndexSearcher indexSearcher = newIndexSearcher(indexReader); + TextFieldMapper.TextFieldType fieldType = new TextFieldMapper.TextFieldType("mv_field"); + fieldType.setFielddata(true); + + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("nick").userValueTypeHint(ValueType.STRING) + .field("mv_field") + .missing("no_nickname"); + TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); + + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + Terms result = reduce(aggregator); + assertEquals(3, result.getBuckets().size()); + assertEquals("no_nickname", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("name1", result.getBuckets().get(1).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertEquals("name2", result.getBuckets().get(2).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(2).getDocCount()); + + } + indexWriter.deleteAll(); + } + + // test keyword + { + FieldType fieldtype = new FieldType(KeywordFieldMapper.Defaults.FIELD_TYPE); + fieldtype.setDocValuesType(DocValuesType.SORTED_SET); + fieldtype.setIndexOptions(IndexOptions.NONE); + fieldtype.setStored(true); + + Document document = new Document(); + document.add(new SortedSetDocValuesField("mv_field1", new BytesRef("name1"))); + document.add(new SortedSetDocValuesField("mv_field1", new BytesRef("name2"))); + indexWriter.addDocument(document); + document = new Document(); + document.add(new SortedSetDocValuesField("mv_field2", new BytesRef("value1"))); + indexWriter.addDocument(document); + document = new Document(); + document.add(new SortedSetDocValuesField("mv_field2", new BytesRef("value2"))); + indexWriter.addDocument(document); + indexWriter.flush(); + try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { + IndexSearcher indexSearcher = newIndexSearcher(indexReader); + KeywordFieldMapper.KeywordFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("mv_field1"); + + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").userValueTypeHint( + ValueType.STRING + ).field("mv_field1").missing("no_nickname1"); + TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); + + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + Terms result = reduce(aggregator); + assertEquals(3, result.getBuckets().size()); + assertEquals("no_nickname1", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("name1", result.getBuckets().get(1).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertEquals("name2", result.getBuckets().get(2).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(2).getDocCount()); + } + } + } + } + } + private final SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); private List generateDocsWithNested(String id, int value, int[] nestedValues) { From 4bd1323782ad53b9bbb999ffa9616365701a0865 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Sun, 23 Feb 2025 12:26:29 -0500 Subject: [PATCH 14/48] Update Andriy Redko (https://github.com/reta) affiliation (#17430) Signed-off-by: Andriy Redko --- MAINTAINERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 68d8543ee2725..8a6890d1ca1c1 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -8,7 +8,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje |--------------------------|---------------------------------------------------------|-------------| | Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | | Andrew Ross | [andrross](https://github.com/andrross) | Amazon | -| Andriy Redko | [reta](https://github.com/reta) | Aiven | +| Andriy Redko | [reta](https://github.com/reta) | Independent | | Ankit Jain | [jainankitk](https://github.com/jainankitk) | Amazon | | Ashish Singh | [ashking94](https://github.com/ashking94) | Amazon | | Bukhtawar Khan | [Bukhtawar](https://github.com/Bukhtawar) | Amazon | From e7ac072875b8ca39519eb6cefb110ff88fec6d6b Mon Sep 17 00:00:00 2001 From: Rajat Gupta <72070007+RajatGupta02@users.noreply.github.com> Date: Mon, 24 Feb 2025 00:29:39 +0530 Subject: [PATCH 15/48] Add systemd configurations to strengthen OS core security (#17107) * Add systemd configurations to strengthen OS core security Signed-off-by: Rajat Gupta * Add systemd template unit file Signed-off-by: Rajat Gupta * Update CHANGELOG-3.0.md Signed-off-by: Rajat Gupta * Revert "Add systemd configurations to strengthen OS core security" This reverts commit 71b2584ecbdce4bd3aa9328d8d562d5a7028e5c8. Signed-off-by: Rajat Gupta * Remove SocketBind Directives and template unit file Signed-off-by: Rajat Gupta * Minor fixes Signed-off-by: Rajat Gupta * Modify systemd unit file in core to be in sync with distribution unit file Signed-off-by: Rajat Gupta * Modify systemd env file to be in sync with opensearch-build Signed-off-by: Rajat Gupta --------- Signed-off-by: Rajat Gupta Signed-off-by: Rajat Gupta <72070007+RajatGupta02@users.noreply.github.com> Co-authored-by: Rajat Gupta --- CHANGELOG-3.0.md | 1 + .../packages/src/common/env/opensearch | 15 +-- .../src/common/systemd/opensearch.service | 111 +++++++++++++++++- 3 files changed, 114 insertions(+), 13 deletions(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 58e5e5cca3acb..9bb8d528a6efb 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [WLM] Add WLM support for search scroll API ([#16981](https://github.com/opensearch-project/OpenSearch/pull/16981)) - Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) - Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) +- Add systemd configurations to strengthen OS core security ([#17107](https://github.com/opensearch-project/OpenSearch/pull/17107)) - Added pull-based Ingestion (APIs, for ingestion source, a Kafka plugin, and IngestionEngine that pulls data from the ingestion source) ([#16958](https://github.com/opensearch-project/OpenSearch/pull/16958)) - Added ConfigurationUtils to core for the ease of configuration parsing [#17223](https://github.com/opensearch-project/OpenSearch/pull/17223) - Add execution_hint to cardinality aggregator request (#[17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) diff --git a/distribution/packages/src/common/env/opensearch b/distribution/packages/src/common/env/opensearch index 198bcfde90c4c..a8b6829766924 100644 --- a/distribution/packages/src/common/env/opensearch +++ b/distribution/packages/src/common/env/opensearch @@ -3,17 +3,17 @@ ################################ # OpenSearch home directory -#OPENSEARCH_HOME=/usr/share/opensearch +OPENSEARCH_HOME=/usr/share/opensearch # OpenSearch Java path -#OPENSEARCH_JAVA_HOME= +#OPENSEARCH_JAVA_HOME=/usr/lib/jvm/java-11-amazon-corretto # OpenSearch configuration directory # Note: this setting will be shared with command-line tools -OPENSEARCH_PATH_CONF=${path.conf} +OPENSEARCH_PATH_CONF=/etc/opensearch # OpenSearch PID directory -#PID_DIR=/var/run/opensearch +PID_DIR=/var/run/opensearch # Additional Java OPTS #OPENSEARCH_JAVA_OPTS= @@ -25,11 +25,12 @@ OPENSEARCH_PATH_CONF=${path.conf} # OpenSearch service ################################ -# SysV init.d -# # The number of seconds to wait before checking if OpenSearch started successfully as a daemon process OPENSEARCH_STARTUP_SLEEP_TIME=5 +# Notification for systemd +OPENSEARCH_SD_NOTIFY=true + ################################ # System properties ################################ @@ -49,4 +50,4 @@ OPENSEARCH_STARTUP_SLEEP_TIME=5 # Maximum number of VMA (Virtual Memory Areas) a process can own # When using Systemd, this setting is ignored and the 'vm.max_map_count' # property is set at boot time in /usr/lib/sysctl.d/opensearch.conf -#MAX_MAP_COUNT=262144 +#MAX_MAP_COUNT=262144 \ No newline at end of file diff --git a/distribution/packages/src/common/systemd/opensearch.service b/distribution/packages/src/common/systemd/opensearch.service index 962dc5d2aae72..74870a35cd097 100644 --- a/distribution/packages/src/common/systemd/opensearch.service +++ b/distribution/packages/src/common/systemd/opensearch.service @@ -1,6 +1,16 @@ +# Copyright OpenSearch Contributors +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. + +# Description: +# Default opensearch.service file + [Unit] Description=OpenSearch -Documentation=https://www.elastic.co +Documentation=https://opensearch.org/ Wants=network-online.target After=network-online.target @@ -8,11 +18,8 @@ After=network-online.target Type=notify RuntimeDirectory=opensearch PrivateTmp=true -Environment=OPENSEARCH_HOME=/usr/share/opensearch -Environment=OPENSEARCH_PATH_CONF=${path.conf} -Environment=PID_DIR=/var/run/opensearch -Environment=OPENSEARCH_SD_NOTIFY=true -EnvironmentFile=-${path.env} +EnvironmentFile=-/etc/default/opensearch +EnvironmentFile=-/etc/sysconfig/opensearch WorkingDirectory=/usr/share/opensearch @@ -29,6 +36,7 @@ ExecStart=/usr/share/opensearch/bin/systemd-entrypoint -p ${PID_DIR}/opensearch. # logging, you can simply remove the "quiet" option from ExecStart. StandardOutput=journal StandardError=inherit +SyslogIdentifier=opensearch # Specifies the maximum file descriptor number that can be opened by this process LimitNOFILE=65535 @@ -60,6 +68,97 @@ SuccessExitStatus=143 # Allow a slow startup before the systemd notifier module kicks in to extend the timeout TimeoutStartSec=75 +# Prevent modifications to the control group filesystem +ProtectControlGroups=true + +# Prevent loading or reading kernel modules +ProtectKernelModules=true + +# Prevent altering kernel tunables (sysctl parameters) +ProtectKernelTunables=true + +# Set device access policy to 'closed', allowing access only to specific devices +DevicePolicy=closed + +# Make /proc invisible to the service, enhancing isolation +ProtectProc=invisible + +# Make /usr, /boot, and /etc read-only (less restrictive than 'strict') +ProtectSystem=full + +# Prevent changes to control groups (redundant with earlier setting, can be removed) +ProtectControlGroups=yes + +# Prevent changing the execution domain +LockPersonality=yes + + +# System call filtering +# System call filterings which restricts which system calls a process can make +# @ means allowed +# ~ means not allowed +SystemCallFilter=@system-service +SystemCallFilter=~@reboot +SystemCallFilter=~@swap + +SystemCallErrorNumber=EPERM + +# Capability restrictions +# Remove the ability to block system suspends +CapabilityBoundingSet=~CAP_BLOCK_SUSPEND + +# Remove the ability to establish leases on files +CapabilityBoundingSet=~CAP_LEASE + +# Remove the ability to use system resource accounting +CapabilityBoundingSet=~CAP_SYS_PACCT + +# Remove the ability to configure TTY devices +CapabilityBoundingSet=~CAP_SYS_TTY_CONFIG + +# Remov below capabilities: +# - CAP_SYS_ADMIN: Various system administration operations +# - CAP_SYS_PTRACE: Ability to trace processes +# - CAP_NET_ADMIN: Various network-related operations +CapabilityBoundingSet=~CAP_SYS_ADMIN ~CAP_SYS_PTRACE ~CAP_NET_ADMIN + + +# Address family restrictions +RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX + +# Filesystem Access + +ReadWritePaths=/var/log/opensearch +ReadWritePaths=/var/lib/opensearch +ReadWritePaths=-/etc/opensearch +ReadWritePaths=-/mnt/snapshots + +## Allow read access to system files +ReadOnlyPaths=/etc/os-release /usr/lib/os-release /etc/system-release + +## Allow read access to Linux IO stats +ReadOnlyPaths=/proc/self/mountinfo /proc/diskstats + +## Allow read access to control group stats +ReadOnlyPaths=/proc/self/cgroup /sys/fs/cgroup/cpu /sys/fs/cgroup/cpu/- +ReadOnlyPaths=/sys/fs/cgroup/cpuacct /sys/fs/cgroup/cpuacct/- /sys/fs/cgroup/memory /sys/fs/cgroup/memory/- + + +RestrictNamespaces=true + +NoNewPrivileges=true + +# Memory and execution protection +MemoryDenyWriteExecute=true # Prevent creating writable executable memory mappings +SystemCallArchitectures=native # Allow only native system calls +KeyringMode=private # Service does not share key material with other services +LockPersonality=true # Prevent changing ABI personality +RestrictSUIDSGID=true # Prevent creating SUID/SGID files +RestrictRealtime=true # Prevent acquiring realtime scheduling +ProtectHostname=true # Prevent changes to system hostname +ProtectKernelLogs=true # Prevent reading/writing kernel logs +ProtectClock=true # Prevent tampering with the system clock + [Install] WantedBy=multi-user.target From 5666982ac659794f73eda6aa07b0044a3bf3c3eb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Feb 2025 07:45:56 -0500 Subject: [PATCH 16/48] Bump com.google.api.grpc:proto-google-common-protos from 2.37.1 to 2.52.0 in /plugins/repository-gcs (#17379) * Bump com.google.api.grpc:proto-google-common-protos Bumps [com.google.api.grpc:proto-google-common-protos](https://github.com/googleapis/sdk-platform-java) from 2.37.1 to 2.52.0. - [Release notes](https://github.com/googleapis/sdk-platform-java/releases) - [Changelog](https://github.com/googleapis/sdk-platform-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/sdk-platform-java/compare/api-common/v2.37.1...v2.52.0) --- updated-dependencies: - dependency-name: com.google.api.grpc:proto-google-common-protos dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: gaobinlong Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: gaobinlong --- CHANGELOG.md | 1 + plugins/repository-gcs/build.gradle | 2 +- .../licenses/proto-google-common-protos-2.37.1.jar.sha1 | 1 - .../licenses/proto-google-common-protos-2.52.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/proto-google-common-protos-2.37.1.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/proto-google-common-protos-2.52.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ad53194361fa..b4d0432b7d3db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump netty from 4.1.117.Final to 4.1.118.Final ([#17320](https://github.com/opensearch-project/OpenSearch/pull/17320)) - Bump `reactor_netty` from 1.1.26 to 1.1.27 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322)) - Bump `me.champeau.gradle.japicmp` from 0.4.5 to 0.4.6 ([#17375](https://github.com/opensearch-project/OpenSearch/pull/17375)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.37.1 to 2.52.0 ([#17379](https://github.com/opensearch-project/OpenSearch/pull/17379)) - Bump `net.minidev:json-smart` from 2.5.1 to 2.5.2 ([#17378](https://github.com/opensearch-project/OpenSearch/pull/17378)) ### Changed diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index d4c870e1ca2b2..53439c1ca7744 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -56,7 +56,7 @@ dependencies { api 'com.google.api-client:google-api-client:2.7.0' - api 'com.google.api.grpc:proto-google-common-protos:2.37.1' + api 'com.google.api.grpc:proto-google-common-protos:2.52.0' api 'com.google.api.grpc:proto-google-iam-v1:1.33.0' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.37.1.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.37.1.jar.sha1 deleted file mode 100644 index 92f991778ccc3..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-2.37.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3b8759ef0468cced72f8f0d4fc3cc57aeb8139f8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.52.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.52.0.jar.sha1 new file mode 100644 index 0000000000000..d955f83389a2d --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-2.52.0.jar.sha1 @@ -0,0 +1 @@ +8f64c0540ed74ca464a4a025b32f967bd764bdbe \ No newline at end of file From 4648c3ff6b6bb0021f1231e22bd8babf39b50607 Mon Sep 17 00:00:00 2001 From: Kaushal Kumar Date: Mon, 24 Feb 2025 10:07:40 -0800 Subject: [PATCH 17/48] [Rule based autotagging] Add attribute value store (#17342) * [rule based autotagging] add attribute value store Signed-off-by: Kaushal Kumar * add CHANGELOG entry Signed-off-by: Kaushal Kumar * make the store interface generic Signed-off-by: Kaushal Kumar * run spotless apply Signed-off-by: Kaushal Kumar * add missing javadoc Signed-off-by: Kaushal Kumar * improve javadoc for attribute value store Signed-off-by: Kaushal Kumar * improve binary search bisecting expression Signed-off-by: Kaushal Kumar * add licenses directory Signed-off-by: Kaushal Kumar --------- Signed-off-by: Kaushal Kumar --- CHANGELOG.md | 1 + plugins/workload-management/build.gradle | 5 + .../licenses/commons-collections-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/commons-collections-NOTICE.txt | 9 + .../commons-collections4-4.4.jar.sha1 | 1 + .../plugin/wlm/rule/package-info.java | 13 ++ .../wlm/rule/storage/AttributeValueStore.java | 47 ++++ .../storage/DefaultAttributeValueStore.java | 101 +++++++++ .../plugin/wlm/rule/storage/package-info.java | 12 ++ .../storage/AttributeValueStoreTests.java | 53 +++++ 10 files changed, 444 insertions(+) create mode 100644 plugins/workload-management/licenses/commons-collections-LICENSE.txt create mode 100644 plugins/workload-management/licenses/commons-collections-NOTICE.txt create mode 100644 plugins/workload-management/licenses/commons-collections4-4.4.jar.sha1 create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/package-info.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStore.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/DefaultAttributeValueStore.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/package-info.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStoreTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index b4d0432b7d3db..428cb6a8073d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/)) - Added ability to retrieve value from DocValues in a flat_object filed([#16802](https://github.com/opensearch-project/OpenSearch/pull/16802)) - Improve performace of NumericTermAggregation by avoiding unnecessary sorting([#17252](https://github.com/opensearch-project/OpenSearch/pull/17252)) +- [Rule Based Auto-tagging] Add in-memory attribute value store ([#17342](https://github.com/opensearch-project/OpenSearch/pull/17342)) ### Dependencies - Bump `org.awaitility:awaitility` from 4.2.0 to 4.2.2 ([#17230](https://github.com/opensearch-project/OpenSearch/pull/17230)) diff --git a/plugins/workload-management/build.gradle b/plugins/workload-management/build.gradle index 2e8b0df468092..c73c63e84ed1f 100644 --- a/plugins/workload-management/build.gradle +++ b/plugins/workload-management/build.gradle @@ -19,4 +19,9 @@ opensearchplugin { } dependencies { + api 'org.apache.commons:commons-collections4:4.4' +} + +tasks.named("dependencyLicenses").configure { + mapping from: /commons-collections.*/, to: 'commons-collections' } diff --git a/plugins/workload-management/licenses/commons-collections-LICENSE.txt b/plugins/workload-management/licenses/commons-collections-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/workload-management/licenses/commons-collections-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/workload-management/licenses/commons-collections-NOTICE.txt b/plugins/workload-management/licenses/commons-collections-NOTICE.txt new file mode 100644 index 0000000000000..79e9484bd56a0 --- /dev/null +++ b/plugins/workload-management/licenses/commons-collections-NOTICE.txt @@ -0,0 +1,9 @@ +Apache Commons Collections +Copyright 2001-2025 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (https://www.apache.org/). + +The Java source file src/main/java/org/apache/commons/collections4/map/ConcurrentReferenceHashMap.java +is from https://github.com/hazelcast/hazelcast and the following notice applies: +Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved. diff --git a/plugins/workload-management/licenses/commons-collections4-4.4.jar.sha1 b/plugins/workload-management/licenses/commons-collections4-4.4.jar.sha1 new file mode 100644 index 0000000000000..6b4ed5ab62b44 --- /dev/null +++ b/plugins/workload-management/licenses/commons-collections4-4.4.jar.sha1 @@ -0,0 +1 @@ +62ebe7544cb7164d87e0637a2a6a2bdc981395e8 \ No newline at end of file diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/package-info.java new file mode 100644 index 0000000000000..85c0562dae5ee --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rule; +/** + * This package holds constructs for the Rule's in-memory storage, processing and syncing the in-memory view + * with the index view + */ diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStore.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStore.java new file mode 100644 index 0000000000000..eb2ce8e4764ea --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStore.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rule.storage; + +import java.util.Optional; + +/** + * This interface provides apis to store Rule attribute values + */ +public interface AttributeValueStore { + /** + * Adds the value to attribute value store + * @param key to be added + * @param value to be added + */ + void put(K key, V value); + + /** + * removes the key and associated value from attribute value store + * @param key to be removed + */ + void remove(K key); + + /** + * Returns the value associated with the key + * @param key in the data structure + * @return + */ + Optional get(K key); + + /** + * Clears all the keys and their associated values from the attribute value store + */ + void clear(); + + /** + * It returns the number of values stored + * @return count of key,val pairs in the store + */ + int size(); +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/DefaultAttributeValueStore.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/DefaultAttributeValueStore.java new file mode 100644 index 0000000000000..8b4c063f7ad1a --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/DefaultAttributeValueStore.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rule.storage; + +import org.apache.commons.collections4.trie.PatriciaTrie; + +import java.util.Map; +import java.util.Optional; + +/** + * This is a patricia trie based implementation of AttributeValueStore + * We are choosing patricia trie because it provides very fast search operations on prefix matches as well as range + * lookups. It provides a very efficient storage for strings + * ref: https://commons.apache.org/proper/commons-collections/javadocs/api-4.4/org/apache/commons/collections4/trie/PatriciaTrie.html + */ +public class DefaultAttributeValueStore implements AttributeValueStore { + PatriciaTrie trie; + + /** + * Default constructor + */ + public DefaultAttributeValueStore() { + this(new PatriciaTrie<>()); + } + + /** + * Main constructor + * @param trie A Patricia Trie + */ + public DefaultAttributeValueStore(PatriciaTrie trie) { + this.trie = trie; + } + + @Override + public void put(K key, V value) { + trie.put(key, value); + } + + @Override + public void remove(String key) { + trie.remove(key); + } + + @Override + public Optional get(String key) { + /** + * Since we are inserting prefixes into the trie and searching for larger strings + * It is important to find the largest matching prefix key in the trie efficiently + * Hence we can do binary search + */ + final String longestMatchingPrefix = findLongestMatchingPrefix(key); + + /** + * Now there are following cases for this prefix + * 1. There is a Rule which has this prefix as one of the attribute values. In this case we should return the + * Rule's label otherwise send empty + */ + for (Map.Entry possibleMatch : trie.prefixMap(longestMatchingPrefix).entrySet()) { + if (key.startsWith(possibleMatch.getKey())) { + return Optional.of(possibleMatch.getValue()); + } + } + + return Optional.empty(); + } + + private String findLongestMatchingPrefix(String key) { + int low = 0; + int high = key.length() - 1; + + while (low < high) { + int mid = (high + low + 1) / 2; + /** + * This operation has O(1) complexity because prefixMap returns only the iterator + */ + if (!trie.prefixMap(key.substring(0, mid)).isEmpty()) { + low = mid; + } else { + high = mid - 1; + } + } + + return key.substring(0, low); + } + + @Override + public void clear() { + trie.clear(); + } + + @Override + public int size() { + return trie.size(); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/package-info.java new file mode 100644 index 0000000000000..6aa721ce22a00 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains interfaces and implementations for in memory rule storage mechanisms + */ +package org.opensearch.plugin.wlm.rule.storage; diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStoreTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStoreTests.java new file mode 100644 index 0000000000000..29c42e51efeb0 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStoreTests.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rule.storage; + +import org.apache.commons.collections4.trie.PatriciaTrie; +import org.opensearch.test.OpenSearchTestCase; + +public class AttributeValueStoreTests extends OpenSearchTestCase { + + AttributeValueStore subjectUnderTest; + + public void setUp() throws Exception { + super.setUp(); + subjectUnderTest = new DefaultAttributeValueStore<>(new PatriciaTrie<>()); + } + + public void testPut() { + subjectUnderTest.put("foo", "bar"); + assertEquals("bar", subjectUnderTest.get("foo").get()); + } + + public void testRemove() { + subjectUnderTest.put("foo", "bar"); + subjectUnderTest.remove("foo"); + assertEquals(0, subjectUnderTest.size()); + } + + public void tesGet() { + subjectUnderTest.put("foo", "bar"); + assertEquals("bar", subjectUnderTest.get("foo").get()); + } + + public void testGetWhenNoProperPrefixIsPresent() { + subjectUnderTest.put("foo", "bar"); + subjectUnderTest.put("foodip", "sing"); + assertTrue(subjectUnderTest.get("foxtail").isEmpty()); + subjectUnderTest.put("fox", "lucy"); + + assertFalse(subjectUnderTest.get("foxtail").isEmpty()); + } + + public void testClear() { + subjectUnderTest.put("foo", "bar"); + subjectUnderTest.clear(); + assertEquals(0, subjectUnderTest.size()); + } +} From 0714a1b753d03cce1f684e6f68e93a430b2e1261 Mon Sep 17 00:00:00 2001 From: Xu Xiong Date: Mon, 24 Feb 2025 14:36:41 -0800 Subject: [PATCH 18/48] [Pull-based Ingestion] Offset management, support rewind by offset or timestamp (#17354) * initial commit Signed-off-by: xuxiong1 * add tests Signed-off-by: xuxiong1 * resolve comments Signed-off-by: xuxiong1 * support optional auto offset config Signed-off-by: xuxiong1 * Update DefaultStreamPollerTests with countDownLatch Signed-off-by: xuxiong1 * use long as timestamp type Signed-off-by: xuxiong1 * add change log Signed-off-by: xuxiong1 --------- Signed-off-by: xuxiong1 --- CHANGELOG-3.0.md | 1 + .../plugin/kafka/IngestFromKafkaIT.java | 69 +++++++++++++++- .../plugin/kafka/KafkaPartitionConsumer.java | 46 +++++++++++ .../plugin/kafka/KafkaSourceConfig.java | 13 +++ .../kafka/KafkaPartitionConsumerTests.java | 16 ++++ .../cluster/metadata/IndexMetadata.java | 56 +++++++++++-- .../cluster/metadata/IngestionSource.java | 47 ++++++++++- .../common/settings/IndexScopedSettings.java | 1 + .../index/IngestionShardConsumer.java | 16 ++++ .../index/engine/IngestionEngine.java | 8 +- .../pollingingest/DefaultStreamPoller.java | 23 +++++- .../indices/pollingingest/StreamPoller.java | 4 + .../metadata/IngestionSourceTests.java | 28 ++++--- .../index/engine/FakeIngestionSource.java | 10 +++ .../DefaultStreamPollerTests.java | 82 ++++++++++++++++--- 15 files changed, 380 insertions(+), 40 deletions(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 9bb8d528a6efb..e0ac2c3ecd80d 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -19,6 +19,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Added ConfigurationUtils to core for the ease of configuration parsing [#17223](https://github.com/opensearch-project/OpenSearch/pull/17223) - Add execution_hint to cardinality aggregator request (#[17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) - Arrow Flight RPC plugin with Flight server bootstrap logic and client for internode communication ([#16962](https://github.com/opensearch-project/OpenSearch/pull/16962)) +- Added offset management for the pull-based Ingestion ([#17354](https://github.com/opensearch-project/OpenSearch/pull/17354)) ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java index e7d8e36acb302..d6b099c6b24d8 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java @@ -102,6 +102,69 @@ public void testKafkaIngestion() { } } + public void testKafkaIngestion_RewindByTimeStamp() { + try { + setupKafka(); + // create an index with ingestion source from kafka + createIndex( + "test_rewind_by_timestamp", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.pointer.init.reset", "rewind_by_timestamp") + // 1739459500000 is the timestamp of the first message + // 1739459800000 is the timestamp of the second message + // by resetting to 1739459600000, only the second message will be ingested + .put("ingestion_source.pointer.init.reset.value", "1739459600000") + .put("ingestion_source.param.topic", "test") + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .put("ingestion_source.param.auto.offset.reset", "latest") + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" + ); + + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(0); + await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + refresh("test_rewind_by_timestamp"); + SearchResponse response = client().prepareSearch("test_rewind_by_timestamp").setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + }); + } finally { + stopKafka(); + } + } + + public void testKafkaIngestion_RewindByOffset() { + try { + setupKafka(); + // create an index with ingestion source from kafka + createIndex( + "test_rewind_by_offset", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.pointer.init.reset", "rewind_by_offset") + .put("ingestion_source.pointer.init.reset.value", "1") + .put("ingestion_source.param.topic", "test") + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .put("ingestion_source.param.auto.offset.reset", "latest") + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" + ); + + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(0); + await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + refresh("test_rewind_by_offset"); + SearchResponse response = client().prepareSearch("test_rewind_by_offset").setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + }); + } finally { + stopKafka(); + } + } + private void setupKafka() { kafka = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:6.2.1")) // disable topic auto creation @@ -122,10 +185,14 @@ private void prepareKafkaData() { Properties props = new Properties(); props.put("bootstrap.servers", kafka.getBootstrapServers()); Producer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); - producer.send(new ProducerRecord<>(topicName, "null", "{\"_id\":\"1\",\"_source\":{\"name\":\"bob\", \"age\": 24}}")); + producer.send( + new ProducerRecord<>(topicName, null, 1739459500000L, "null", "{\"_id\":\"1\",\"_source\":{\"name\":\"bob\", \"age\": 24}}") + ); producer.send( new ProducerRecord<>( topicName, + null, + 1739459800000L, "null", "{\"_id\":\"2\", \"_op_type:\":\"index\",\"_source\":{\"name\":\"alice\", \"age\": 20}}" ) diff --git a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java index a20e52a06eecd..9461cfbc2de98 100644 --- a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java +++ b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java @@ -9,9 +9,12 @@ package org.opensearch.plugin.kafka; import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.consumer.OffsetAndTimestamp; +import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.serialization.ByteArrayDeserializer; @@ -27,6 +30,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Properties; import java.util.concurrent.TimeoutException; @@ -47,6 +51,7 @@ public class KafkaPartitionConsumer implements IngestionShardConsumer consumer) { this.clientId = clientId; this.consumer = consumer; + this.config = config; String topic = config.getTopic(); List partitionInfos = AccessController.doPrivileged( (PrivilegedAction>) () -> consumer.partitionsFor(topic, Duration.ofMillis(timeoutMillis)) @@ -93,6 +99,9 @@ protected static Consumer createConsumer(String clientId, KafkaS Properties consumerProp = new Properties(); consumerProp.put("bootstrap.servers", config.getBootstrapServers()); consumerProp.put("client.id", clientId); + if (config.getAutoOffsetResetConfig() != null && !config.getAutoOffsetResetConfig().isEmpty()) { + consumerProp.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, config.getAutoOffsetResetConfig()); + } // TODO: why Class org.apache.kafka.common.serialization.StringDeserializer could not be found if set the deserializer as prop? // consumerProp.put("key.deserializer", // "org.apache.kafka.common.serialization.StringDeserializer"); @@ -140,6 +149,43 @@ public IngestionShardPointer latestPointer() { return new KafkaOffset(endOffset); } + @Override + public IngestionShardPointer pointerFromTimestampMillis(long timestampMillis) { + long offset = AccessController.doPrivileged((PrivilegedAction) () -> { + Map position = consumer.offsetsForTimes( + Collections.singletonMap(topicPartition, timestampMillis) + ); + if (position == null || position.isEmpty()) { + return -1L; + } + OffsetAndTimestamp offsetAndTimestamp = position.values().iterator().next(); + if (offsetAndTimestamp == null) { + return -1L; + } + return offsetAndTimestamp.offset(); + }); + if (offset < 0) { + logger.warn("No message found for timestamp {}, fall back to auto.offset.reset policy", timestampMillis); + String autoOffsetResetConfig = config.getAutoOffsetResetConfig(); + if (OffsetResetStrategy.EARLIEST.toString().equals(autoOffsetResetConfig)) { + logger.warn("The auto.offset.reset is set to earliest, seek to earliest pointer"); + return earliestPointer(); + } else if (OffsetResetStrategy.LATEST.toString().equals(autoOffsetResetConfig)) { + logger.warn("The auto.offset.reset is set to latest, seek to latest pointer"); + return latestPointer(); + } else { + throw new IllegalArgumentException("No message found for timestamp " + timestampMillis); + } + } + return new KafkaOffset(offset); + } + + @Override + public IngestionShardPointer pointerFromOffset(String offset) { + long offsetValue = Long.parseLong(offset); + return new KafkaOffset(offsetValue); + } + private synchronized List> fetch(long startOffset, long maxMessages, int timeoutMillis) { if (lastFetchedOffset < 0 || lastFetchedOffset != startOffset - 1) { logger.info("Seeking to offset {}", startOffset); diff --git a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java index 722883d353ebf..cbb8530963ec8 100644 --- a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java +++ b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java @@ -18,9 +18,12 @@ public class KafkaSourceConfig { private final String PROP_TOPIC = "topic"; private final String PROP_BOOTSTRAP_SERVERS = "bootstrap_servers"; + // TODO: support pass any generic kafka configs + private final String PROP_AUTO_OFFSET_RESET = "auto.offset.reset"; private final String topic; private final String bootstrapServers; + private final String autoOffsetResetConfig; /** * Constructor @@ -29,6 +32,7 @@ public class KafkaSourceConfig { public KafkaSourceConfig(Map params) { this.topic = ConfigurationUtils.readStringProperty(params, PROP_TOPIC); this.bootstrapServers = ConfigurationUtils.readStringProperty(params, PROP_BOOTSTRAP_SERVERS); + this.autoOffsetResetConfig = ConfigurationUtils.readOptionalStringProperty(params, PROP_AUTO_OFFSET_RESET); } /** @@ -47,4 +51,13 @@ public String getTopic() { public String getBootstrapServers() { return bootstrapServers; } + + /** + * Get the auto offset reset configuration + * + * @return the auto offset reset configuration + */ + public String getAutoOffsetResetConfig() { + return autoOffsetResetConfig; + } } diff --git a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java index 96f639366d887..d1d9ad4fbf8ae 100644 --- a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java +++ b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java @@ -90,6 +90,22 @@ public void testLatestPointer() { assertEquals(10L, offset.getOffset()); } + public void testPointerFromTimestampMillis() { + TopicPartition topicPartition = new TopicPartition("test-topic", 0); + when(mockConsumer.offsetsForTimes(Collections.singletonMap(topicPartition, 1000L))).thenReturn( + Collections.singletonMap(topicPartition, new org.apache.kafka.clients.consumer.OffsetAndTimestamp(5L, 1000L)) + ); + + KafkaOffset offset = (KafkaOffset) consumer.pointerFromTimestampMillis(1000); + + assertEquals(5L, offset.getOffset()); + } + + public void testPointerFromOffset() { + KafkaOffset offset = new KafkaOffset(5L); + assertEquals(5L, offset.getOffset()); + } + public void testTopicDoesNotExist() { Map params = new HashMap<>(); params.put("topic", "non-existent-topic"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index d50192f106cfe..d4fcadc4ac56d 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -716,8 +716,7 @@ public void validate(final String value, final Map, Object> settings) @Override public void validate(final String value) { - if (!(value.equalsIgnoreCase(StreamPoller.ResetState.LATEST.name()) - || value.equalsIgnoreCase(StreamPoller.ResetState.EARLIEST.name()))) { + if (!isValidResetState(value)) { throw new IllegalArgumentException( "Invalid value for " + SETTING_INGESTION_SOURCE_POINTER_INIT_RESET + " [" + value + "]" ); @@ -725,10 +724,50 @@ public void validate(final String value) { } @Override - public void validate(final String value, final Map, Object> settings) {} + public void validate(final String value, final Map, Object> settings) { + if (isRewindState(value)) { + // Ensure the reset value setting is provided when rewinding. + final String resetValue = (String) settings.get(INGESTION_SOURCE_POINTER_INIT_RESET_VALUE_SETTING); + if (resetValue == null || resetValue.isEmpty()) { + throw new IllegalArgumentException( + "Setting " + + INGESTION_SOURCE_POINTER_INIT_RESET_VALUE_SETTING.getKey() + + " should be set when REWIND_BY_OFFSET or REWIND_BY_TIMESTAMP" + ); + } + } + } + + private boolean isValidResetState(String value) { + return StreamPoller.ResetState.LATEST.name().equalsIgnoreCase(value) + || StreamPoller.ResetState.EARLIEST.name().equalsIgnoreCase(value) + || isRewindState(value); + } + + private boolean isRewindState(String value) { + return StreamPoller.ResetState.REWIND_BY_OFFSET.name().equalsIgnoreCase(value) + || StreamPoller.ResetState.REWIND_BY_TIMESTAMP.name().equalsIgnoreCase(value); + } + + @Override + public Iterator> settings() { + final List> settings = Collections.singletonList(INGESTION_SOURCE_POINTER_INIT_RESET_VALUE_SETTING); + return settings.iterator(); + } }, Property.IndexScope, - Property.Dynamic + Property.Final + ); + + /** + * Defines the setting for the value to be used when resetting by offset or timestamp. + */ + public static final String SETTING_INGESTION_SOURCE_POINTER_INIT_RESET_VALUE = "index.ingestion_source.pointer.init.reset.value"; + public static final Setting INGESTION_SOURCE_POINTER_INIT_RESET_VALUE_SETTING = Setting.simpleString( + SETTING_INGESTION_SOURCE_POINTER_INIT_RESET_VALUE, + "", + Property.IndexScope, + Property.Final ); public static final Setting.AffixSetting INGESTION_SOURCE_PARAMS_SETTING = Setting.prefixKeySetting( @@ -954,7 +993,14 @@ public Version getCreationVersion() { public IngestionSource getIngestionSource() { final String ingestionSourceType = INGESTION_SOURCE_TYPE_SETTING.get(settings); if (ingestionSourceType != null && !(NONE_INGESTION_SOURCE_TYPE.equals(ingestionSourceType))) { - final String pointerInitReset = INGESTION_SOURCE_POINTER_INIT_RESET_SETTING.get(settings); + final StreamPoller.ResetState pointerInitResetType = StreamPoller.ResetState.valueOf( + INGESTION_SOURCE_POINTER_INIT_RESET_SETTING.get(settings).toUpperCase(Locale.ROOT) + ); + final String pointerInitResetValue = INGESTION_SOURCE_POINTER_INIT_RESET_VALUE_SETTING.get(settings); + IngestionSource.PointerInitReset pointerInitReset = new IngestionSource.PointerInitReset( + pointerInitResetType, + pointerInitResetValue + ); final Map ingestionSourceParams = INGESTION_SOURCE_PARAMS_SETTING.getAsMap(settings); return new IngestionSource(ingestionSourceType, pointerInitReset, ingestionSourceParams); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java b/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java index 583114d9ecbd2..9849c0a5f2ba9 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java @@ -9,6 +9,7 @@ package org.opensearch.cluster.metadata; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.indices.pollingingest.StreamPoller; import java.util.Map; import java.util.Objects; @@ -19,10 +20,10 @@ @ExperimentalApi public class IngestionSource { private String type; - private String pointerInitReset; + private PointerInitReset pointerInitReset; private Map params; - public IngestionSource(String type, String pointerInitReset, Map params) { + public IngestionSource(String type, PointerInitReset pointerInitReset, Map params) { this.type = type; this.pointerInitReset = pointerInitReset; this.params = params; @@ -32,7 +33,7 @@ public String getType() { return type; } - public String getPointerInitReset() { + public PointerInitReset getPointerInitReset() { return pointerInitReset; } @@ -59,4 +60,44 @@ public int hashCode() { public String toString() { return "IngestionSource{" + "type='" + type + '\'' + ",pointer_init_reset='" + pointerInitReset + '\'' + ", params=" + params + '}'; } + + /** + * Class encapsulating the configuration of a pointer initialization. + */ + @ExperimentalApi + public static class PointerInitReset { + private final StreamPoller.ResetState type; + private final String value; + + public PointerInitReset(StreamPoller.ResetState type, String value) { + this.type = type; + this.value = value; + } + + public StreamPoller.ResetState getType() { + return type; + } + + public String getValue() { + return value; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PointerInitReset pointerInitReset = (PointerInitReset) o; + return Objects.equals(type, pointerInitReset.type) && Objects.equals(value, pointerInitReset.value); + } + + @Override + public int hashCode() { + return Objects.hash(type, value); + } + + @Override + public String toString() { + return "PointerInitReset{" + "type='" + type + '\'' + ", value=" + value + '}'; + } + } } diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 0e21104fb6426..946d7fe734deb 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -263,6 +263,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { // Settings for ingestion source IndexMetadata.INGESTION_SOURCE_TYPE_SETTING, IndexMetadata.INGESTION_SOURCE_POINTER_INIT_RESET_SETTING, + IndexMetadata.INGESTION_SOURCE_POINTER_INIT_RESET_VALUE_SETTING, IndexMetadata.INGESTION_SOURCE_PARAMS_SETTING, // validate that built-in similarities don't get redefined diff --git a/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java b/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java index 02a9f5a18ebb1..41e659196a612 100644 --- a/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java +++ b/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java @@ -82,6 +82,22 @@ public M getMessage() { */ IngestionShardPointer latestPointer(); + /** + * Returns an ingestion shard pointer based on the provided timestamp in milliseconds. + * + * @param timestampMillis the timestamp in milliseconds + * @return the ingestion shard pointer corresponding to the given timestamp + */ + IngestionShardPointer pointerFromTimestampMillis(long timestampMillis); + + /** + * Returns an ingestion shard pointer based on the provided offset. + * + * @param offset the offset value + * @return the ingestion shard pointer corresponding to the given offset + */ + IngestionShardPointer pointerFromOffset(String offset); + /** * @return the shard id */ diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java index 58c6371d51c0a..b37281b9d1582 100644 --- a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -65,7 +65,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -172,9 +171,7 @@ public void start() { logger.info("created ingestion consumer for shard [{}]", engineConfig.getShardId()); Map commitData = commitDataAsMap(); - StreamPoller.ResetState resetState = StreamPoller.ResetState.valueOf( - ingestionSource.getPointerInitReset().toUpperCase(Locale.ROOT) - ); + StreamPoller.ResetState resetState = ingestionSource.getPointerInitReset().getType(); IngestionShardPointer startPointer = null; Set persistedPointers = new HashSet<>(); if (commitData.containsKey(StreamPoller.BATCH_START)) { @@ -191,7 +188,8 @@ public void start() { resetState = StreamPoller.ResetState.NONE; } - streamPoller = new DefaultStreamPoller(startPointer, persistedPointers, ingestionShardConsumer, this, resetState); + String resetValue = ingestionSource.getPointerInitReset().getValue(); + streamPoller = new DefaultStreamPoller(startPointer, persistedPointers, ingestionShardConsumer, this, resetState, resetValue); streamPoller.start(); } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java index b5c1db999544a..884cffec4aad5 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java @@ -52,6 +52,7 @@ public class DefaultStreamPoller implements StreamPoller { private IngestionShardPointer batchStartPointer; private ResetState resetState; + private final String resetValue; private Set persistedPointers; @@ -68,14 +69,16 @@ public DefaultStreamPoller( Set persistedPointers, IngestionShardConsumer consumer, IngestionEngine ingestionEngine, - ResetState resetState + ResetState resetState, + String resetValue ) { this( startPointer, persistedPointers, consumer, new MessageProcessorRunnable(new ArrayBlockingQueue<>(100), ingestionEngine), - resetState + resetState, + resetValue ); } @@ -84,10 +87,12 @@ public DefaultStreamPoller( Set persistedPointers, IngestionShardConsumer consumer, MessageProcessorRunnable processorRunnable, - ResetState resetState + ResetState resetState, + String resetValue ) { this.consumer = Objects.requireNonNull(consumer); this.resetState = resetState; + this.resetValue = resetValue; batchStartPointer = startPointer; this.persistedPointers = persistedPointers; if (!this.persistedPointers.isEmpty()) { @@ -151,6 +156,18 @@ protected void startPoll() { batchStartPointer = consumer.latestPointer(); logger.info("Resetting offset by seeking to latest offset {}", batchStartPointer.asString()); break; + case REWIND_BY_OFFSET: + batchStartPointer = consumer.pointerFromOffset(resetValue); + logger.info("Resetting offset by seeking to offset {}", batchStartPointer.asString()); + break; + case REWIND_BY_TIMESTAMP: + batchStartPointer = consumer.pointerFromTimestampMillis(Long.parseLong(resetValue)); + logger.info( + "Resetting offset by seeking to timestamp {}, corresponding offset {}", + resetValue, + batchStartPointer.asString() + ); + break; } resetState = ResetState.NONE; } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java index f674f6dc55c85..5010982991ecc 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java @@ -8,6 +8,7 @@ package org.opensearch.indices.pollingingest; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.index.IngestionShardPointer; import java.io.Closeable; @@ -63,9 +64,12 @@ enum State { /** * a reset state to indicate how to reset the pointer */ + @ExperimentalApi enum ResetState { EARLIEST, LATEST, + REWIND_BY_OFFSET, + REWIND_BY_TIMESTAMP, NONE, } } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java index f67d13e54e608..0afe67002517b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.metadata; +import org.opensearch.indices.pollingingest.StreamPoller; import org.opensearch.test.OpenSearchTestCase; import java.util.HashMap; @@ -15,53 +16,60 @@ public class IngestionSourceTests extends OpenSearchTestCase { + private final IngestionSource.PointerInitReset pointerInitReset = new IngestionSource.PointerInitReset( + StreamPoller.ResetState.REWIND_BY_OFFSET, + "1000" + ); + public void testConstructorAndGetters() { Map params = new HashMap<>(); params.put("key", "value"); - IngestionSource source = new IngestionSource("type", "pointerInitReset", params); + IngestionSource source = new IngestionSource("type", pointerInitReset, params); assertEquals("type", source.getType()); - assertEquals("pointerInitReset", source.getPointerInitReset()); + assertEquals(StreamPoller.ResetState.REWIND_BY_OFFSET, source.getPointerInitReset().getType()); + assertEquals("1000", source.getPointerInitReset().getValue()); assertEquals(params, source.params()); } public void testEquals() { Map params1 = new HashMap<>(); params1.put("key", "value"); - IngestionSource source1 = new IngestionSource("type", "pointerInitReset", params1); + IngestionSource source1 = new IngestionSource("type", pointerInitReset, params1); Map params2 = new HashMap<>(); params2.put("key", "value"); - IngestionSource source2 = new IngestionSource("type", "pointerInitReset", params2); + IngestionSource source2 = new IngestionSource("type", pointerInitReset, params2); assertTrue(source1.equals(source2)); assertTrue(source2.equals(source1)); - IngestionSource source3 = new IngestionSource("differentType", "pointerInitReset", params1); + IngestionSource source3 = new IngestionSource("differentType", pointerInitReset, params1); assertFalse(source1.equals(source3)); } public void testHashCode() { Map params1 = new HashMap<>(); params1.put("key", "value"); - IngestionSource source1 = new IngestionSource("type", "pointerInitReset", params1); + IngestionSource source1 = new IngestionSource("type", pointerInitReset, params1); Map params2 = new HashMap<>(); params2.put("key", "value"); - IngestionSource source2 = new IngestionSource("type", "pointerInitReset", params2); + IngestionSource source2 = new IngestionSource("type", pointerInitReset, params2); assertEquals(source1.hashCode(), source2.hashCode()); - IngestionSource source3 = new IngestionSource("differentType", "pointerInitReset", params1); + IngestionSource source3 = new IngestionSource("differentType", pointerInitReset, params1); assertNotEquals(source1.hashCode(), source3.hashCode()); } public void testToString() { Map params = new HashMap<>(); params.put("key", "value"); - IngestionSource source = new IngestionSource("type", "pointerInitReset", params); + IngestionSource source = new IngestionSource("type", pointerInitReset, params); - String expected = "IngestionSource{type='type',pointer_init_reset='pointerInitReset', params={key=value}}"; + String expected = + "IngestionSource{type='type',pointer_init_reset='PointerInitReset{type='REWIND_BY_OFFSET', value=1000}', params={key=value}}"; assertEquals(expected, source.toString()); } } diff --git a/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java b/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java index de03dcd313c29..1d81a22e94e9c 100644 --- a/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java +++ b/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java @@ -93,6 +93,16 @@ public FakeIngestionShardPointer latestPointer() { return new FakeIngestionShardPointer(messages.size()); } + @Override + public IngestionShardPointer pointerFromTimestampMillis(long timestampMillis) { + throw new UnsupportedOperationException("Not implemented yet."); + } + + @Override + public IngestionShardPointer pointerFromOffset(String offset) { + return new FakeIngestionShardPointer(Long.parseLong(offset)); + } + @Override public int getShardId() { return shardId; diff --git a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java index 1a98f65d04f7c..c17b11791af09 100644 --- a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java +++ b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java @@ -20,8 +20,11 @@ import java.util.List; import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -34,7 +37,7 @@ public class DefaultStreamPollerTests extends OpenSearchTestCase { private MessageProcessorRunnable.MessageProcessor processor; private List messages; private Set persistedPointers; - private final int sleepTime = 300; + private final int awaitTime = 300; @Before public void setUp() throws Exception { @@ -52,7 +55,8 @@ public void setUp() throws Exception { persistedPointers, fakeConsumer, processorRunnable, - StreamPoller.ResetState.NONE + StreamPoller.ResetState.NONE, + "" ); } @@ -65,16 +69,32 @@ public void tearDown() throws Exception { } public void testPauseAndResume() throws InterruptedException { + // We'll use a latch that counts the number of messages processed. + CountDownLatch pauseLatch = new CountDownLatch(2); + doAnswer(invocation -> { + pauseLatch.countDown(); + return null; + }).when(processor).process(any(), any()); + poller.pause(); poller.start(); - Thread.sleep(sleepTime); // Allow some time for the poller to run + + // Wait briefly to ensure that no processing occurs. + boolean processedWhilePaused = pauseLatch.await(awaitTime, TimeUnit.MILLISECONDS); + // Expecting the latch NOT to reach zero because we are paused. + assertFalse("Messages should not be processed while paused", processedWhilePaused); assertEquals(DefaultStreamPoller.State.PAUSED, poller.getState()); assertTrue(poller.isPaused()); - // no messages are processed verify(processor, never()).process(any(), any()); + CountDownLatch resumeLatch = new CountDownLatch(2); + doAnswer(invocation -> { + resumeLatch.countDown(); + return null; + }).when(processor).process(any(), any()); + poller.resume(); - Thread.sleep(sleepTime); // Allow some time for the poller to run + resumeLatch.await(); assertFalse(poller.isPaused()); // 2 messages are processed verify(processor, times(2)).process(any(), any()); @@ -90,10 +110,18 @@ public void testSkipProcessed() throws InterruptedException { persistedPointers, fakeConsumer, processorRunnable, - StreamPoller.ResetState.NONE + StreamPoller.ResetState.NONE, + "" ); + + CountDownLatch latch = new CountDownLatch(2); + doAnswer(invocation -> { + latch.countDown(); + return null; + }).when(processor).process(any(), any()); + poller.start(); - Thread.sleep(sleepTime); // Allow some time for the poller to run + latch.await(); // 2 messages are processed, 2 messages are skipped verify(processor, times(2)).process(any(), any()); assertEquals(new FakeIngestionSource.FakeIngestionShardPointer(2), poller.getMaxPersistedPointer()); @@ -106,7 +134,7 @@ public void testCloseWithoutStart() { public void testClose() throws InterruptedException { poller.start(); - Thread.sleep(sleepTime); // Allow some time for the poller to run + waitUntil(() -> poller.getState() == DefaultStreamPoller.State.POLLING, awaitTime, TimeUnit.MILLISECONDS); poller.close(); assertTrue(poller.isClosed()); assertEquals(DefaultStreamPoller.State.CLOSED, poller.getState()); @@ -118,11 +146,17 @@ public void testResetStateEarliest() throws InterruptedException { persistedPointers, fakeConsumer, processorRunnable, - StreamPoller.ResetState.EARLIEST + StreamPoller.ResetState.EARLIEST, + "" ); + CountDownLatch latch = new CountDownLatch(2); + doAnswer(invocation -> { + latch.countDown(); + return null; + }).when(processor).process(any(), any()); poller.start(); - Thread.sleep(sleepTime); // Allow some time for the poller to run + latch.await(); // 2 messages are processed verify(processor, times(2)).process(any(), any()); @@ -134,17 +168,39 @@ public void testResetStateLatest() throws InterruptedException { persistedPointers, fakeConsumer, processorRunnable, - StreamPoller.ResetState.LATEST + StreamPoller.ResetState.LATEST, + "" ); poller.start(); - Thread.sleep(sleepTime); // Allow some time for the poller to run + waitUntil(() -> poller.getState() == DefaultStreamPoller.State.POLLING, awaitTime, TimeUnit.MILLISECONDS); // no messages processed verify(processor, never()).process(any(), any()); // reset to the latest assertEquals(new FakeIngestionSource.FakeIngestionShardPointer(2), poller.getBatchStartPointer()); } + public void testResetStateRewindByOffset() throws InterruptedException { + poller = new DefaultStreamPoller( + new FakeIngestionSource.FakeIngestionShardPointer(2), + persistedPointers, + fakeConsumer, + processorRunnable, + StreamPoller.ResetState.REWIND_BY_OFFSET, + "1" + ); + CountDownLatch latch = new CountDownLatch(1); + doAnswer(invocation -> { + latch.countDown(); + return null; + }).when(processor).process(any(), any()); + + poller.start(); + latch.await(); + // 1 message is processed + verify(processor, times(1)).process(any(), any()); + } + public void testStartPollWithoutStart() { try { poller.startPoll(); @@ -156,7 +212,7 @@ public void testStartPollWithoutStart() { public void testStartClosedPoller() throws InterruptedException { poller.start(); - Thread.sleep(sleepTime); + waitUntil(() -> poller.getState() == DefaultStreamPoller.State.POLLING, awaitTime, TimeUnit.MILLISECONDS); poller.close(); try { poller.startPoll(); From bc209ee6bacbb1027dcd7ba28d56b6ceb96f4fe0 Mon Sep 17 00:00:00 2001 From: Divyansh Pandey <98746046+pandeydivyansh1803@users.noreply.github.com> Date: Tue, 25 Feb 2025 08:39:14 +0530 Subject: [PATCH 19/48] Add new index and cluster level settings to limit the total primary shards per node and per index (#17295) * Added a new index level setting to limit the total primary shards per index per node. Added relevant files for unit test and integration test. Signed-off-by: Divyansh Pandey * update files for code quality Signed-off-by: Divyansh Pandey * moved primary shard count function to RoutingNode.java Signed-off-by: Divyansh Pandey * removed unwanted files Signed-off-by: Divyansh Pandey * added cluster level setting to limit total primary shards per node Signed-off-by: Divyansh Pandey * allow the index level settings to be applied to both DOCUMENT and SEGMENT replication indices Signed-off-by: Divyansh Pandey * Added necessary validator to restrict the index and cluster level primary shards per node settings only for remote store enabled cluster. Added relevant unit and integration tests. Signed-off-by: Divyansh Pandey * refactoring changes Signed-off-by: Divyansh Pandey * refactoring changes Signed-off-by: Divyansh Pandey * Empty commit to rerun gradle test Signed-off-by: Divyansh Pandey * optimised the calculation of total primary shards on a node Signed-off-by: Divyansh Pandey * Refactoring changes Signed-off-by: Divyansh Pandey * refactoring changes, added TODO to MetadataCreateIndexService Signed-off-by: Divyansh Pandey * Added integration test for scenario where primary shards setting is set for cluster which is not remote store enabled Signed-off-by: Divyansh Pandey --------- Signed-off-by: Divyansh Pandey Signed-off-by: Divyansh Pandey <98746046+pandeydivyansh1803@users.noreply.github.com> Co-authored-by: Divyansh Pandey --- CHANGELOG-3.0.md | 1 + .../ShardsLimitAllocationDeciderIT.java | 305 +++++++++++++++ ...AllocationDeciderRemoteStoreEnabledIT.java | 248 +++++++++++++ .../TransportClusterUpdateSettingsAction.java | 32 ++ .../cluster/metadata/IndexMetadata.java | 11 + .../metadata/MetadataCreateIndexService.java | 24 ++ .../MetadataIndexTemplateService.java | 4 + .../MetadataUpdateSettingsService.java | 2 + .../cluster/routing/RoutingNode.java | 113 +++++- .../decider/ShardsLimitAllocationDecider.java | 91 ++++- .../common/settings/ClusterSettings.java | 1 + .../common/settings/IndexScopedSettings.java | 1 + .../MetadataCreateIndexServiceTests.java | 91 +++++ .../MetadataIndexTemplateServiceTests.java | 19 + .../cluster/routing/RoutingNodeTests.java | 55 +++ .../ShardsLimitAllocationDeciderTests.java | 349 ++++++++++++++++++ 16 files changed, 1320 insertions(+), 27 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderIT.java create mode 100644 server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java create mode 100644 server/src/test/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderTests.java diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index e0ac2c3ecd80d..e4ae38e8da2ae 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add systemd configurations to strengthen OS core security ([#17107](https://github.com/opensearch-project/OpenSearch/pull/17107)) - Added pull-based Ingestion (APIs, for ingestion source, a Kafka plugin, and IngestionEngine that pulls data from the ingestion source) ([#16958](https://github.com/opensearch-project/OpenSearch/pull/16958)) - Added ConfigurationUtils to core for the ease of configuration parsing [#17223](https://github.com/opensearch-project/OpenSearch/pull/17223) +- Add cluster and index level settings to limit the total primary shards per node and per index [#17295](https://github.com/opensearch-project/OpenSearch/pull/17295) - Add execution_hint to cardinality aggregator request (#[17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) - Arrow Flight RPC plugin with Flight server bootstrap logic and client for internode communication ([#16962](https://github.com/opensearch-project/OpenSearch/pull/16962)) - Added offset management for the pull-based Ingestion ([#17354](https://github.com/opensearch-project/OpenSearch/pull/17354)) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderIT.java new file mode 100644 index 0000000000000..fdc6a7e6b96b2 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderIT.java @@ -0,0 +1,305 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation.decider; + +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 3) +public class ShardsLimitAllocationDeciderIT extends OpenSearchIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).build(); + } + + public void testClusterWideShardsLimit() { + // Set the cluster-wide shard limit to 2 + updateClusterSetting(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 4); + + // Create the first two indices with 3 shards and 1 replica each + createIndex("test1", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 1).build()); + createIndex("test2", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 1).build()); + + // Create the third index with 2 shards and 1 replica + createIndex("test3", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put(SETTING_NUMBER_OF_REPLICAS, 1).build()); + + // Wait for the shard limit to be applied + try { + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + // Check total number of shards + assertEquals(16, state.getRoutingTable().allShards().size()); + + // Check number of unassigned shards + int unassignedShards = state.getRoutingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(); + assertEquals(4, unassignedShards); + + // Check shard distribution across nodes + for (RoutingNode routingNode : state.getRoutingNodes()) { + assertTrue("Node exceeds shard limit", routingNode.numberOfOwningShards() <= 4); + } + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + + // Additional assertions to verify shard distribution + ClusterState state = client().admin().cluster().prepareState().get().getState(); + int totalAssignedShards = 0; + for (RoutingNode routingNode : state.getRoutingNodes()) { + totalAssignedShards += routingNode.numberOfOwningShards(); + } + assertEquals("Total assigned shards should be 12", 12, totalAssignedShards); + + } + + public void testIndexSpecificShardLimit() { + // Set the index-specific shard limit to 2 for the first index only + Settings indexSettingsWithLimit = Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 4) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 2) + .build(); + + Settings indexSettingsWithoutLimit = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 4).put(SETTING_NUMBER_OF_REPLICAS, 1).build(); + + // Create the first index with 4 shards, 1 replica, and the index-specific limit + createIndex("test1", indexSettingsWithLimit); + + // Create the second index with 4 shards and 1 replica, without the index-specific limit + createIndex("test2", indexSettingsWithoutLimit); + + // Create the third index with 3 shards and 1 replica, without the index-specific limit + createIndex("test3", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 1).build()); + + try { + // Wait for the shard limit to be applied + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + // Check total number of shards + assertEquals(22, state.getRoutingTable().allShards().size()); + + // Check total number of assigned and unassigned shards + int totalAssignedShards = 0; + int totalUnassignedShards = 0; + Map unassignedShardsByIndex = new HashMap<>(); + + for (IndexRoutingTable indexRoutingTable : state.getRoutingTable()) { + String index = indexRoutingTable.getIndex().getName(); + int indexUnassignedShards = 0; + + for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) { + for (ShardRouting shardRouting : shardRoutingTable) { + if (shardRouting.unassigned()) { + totalUnassignedShards++; + indexUnassignedShards++; + } else { + totalAssignedShards++; + } + } + } + + unassignedShardsByIndex.put(index, indexUnassignedShards); + } + + assertEquals("Total assigned shards should be 20", 20, totalAssignedShards); + assertEquals("Total unassigned shards should be 2", 2, totalUnassignedShards); + + // Check unassigned shards for each index + assertEquals("test1 should have 2 unassigned shards", 2, unassignedShardsByIndex.get("test1").intValue()); + assertEquals("test2 should have 0 unassigned shards", 0, unassignedShardsByIndex.get("test2").intValue()); + assertEquals("test3 should have 0 unassigned shards", 0, unassignedShardsByIndex.get("test3").intValue()); + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void testCombinedClusterAndIndexSpecificShardLimits() { + // Set the cluster-wide shard limit to 6 + updateClusterSetting(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 6); + + // Create the first index with 3 shards, 1 replica, and index-specific limit of 1 + Settings indexSettingsWithLimit = Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 3) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 1) + .build(); + createIndex("test1", indexSettingsWithLimit); + + // Create the second index with 4 shards and 1 replica + createIndex("test2", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 4).put(SETTING_NUMBER_OF_REPLICAS, 1).build()); + + // Create the third index with 3 shards and 1 replica + createIndex("test3", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 1).build()); + + try { + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + // Check total number of shards + assertEquals("Total shards should be 20", 20, state.getRoutingTable().allShards().size()); + + int totalAssignedShards = 0; + int totalUnassignedShards = 0; + Map unassignedShardsByIndex = new HashMap<>(); + Map nodeShardCounts = new HashMap<>(); + Map> indexShardsPerNode = new HashMap<>(); + + for (RoutingNode routingNode : state.getRoutingNodes()) { + String nodeName = routingNode.node().getName(); + nodeShardCounts.put(nodeName, routingNode.numberOfOwningShards()); + indexShardsPerNode.put(nodeName, new HashSet<>()); + + for (ShardRouting shardRouting : routingNode) { + indexShardsPerNode.get(nodeName).add(shardRouting.getIndexName()); + } + } + + for (IndexRoutingTable indexRoutingTable : state.getRoutingTable()) { + String index = indexRoutingTable.getIndex().getName(); + int indexUnassignedShards = 0; + + for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) { + for (ShardRouting shardRouting : shardRoutingTable) { + if (shardRouting.unassigned()) { + totalUnassignedShards++; + indexUnassignedShards++; + } else { + totalAssignedShards++; + } + } + } + + unassignedShardsByIndex.put(index, indexUnassignedShards); + } + + assertEquals("Total assigned shards should be 17", 17, totalAssignedShards); + assertEquals("Total unassigned shards should be 3", 3, totalUnassignedShards); + assertEquals("test1 should have 3 unassigned shards", 3, unassignedShardsByIndex.get("test1").intValue()); + assertEquals("test2 should have 0 unassigned shards", 0, unassignedShardsByIndex.getOrDefault("test2", 0).intValue()); + assertEquals("test3 should have 0 unassigned shards", 0, unassignedShardsByIndex.getOrDefault("test3", 0).intValue()); + + // Check shard distribution across nodes + List shardCounts = new ArrayList<>(nodeShardCounts.values()); + Collections.sort(shardCounts, Collections.reverseOrder()); + assertEquals("Two nodes should have 6 shards", 6, shardCounts.get(0).intValue()); + assertEquals("Two nodes should have 6 shards", 6, shardCounts.get(1).intValue()); + assertEquals("One node should have 5 shards", 5, shardCounts.get(2).intValue()); + + // Check that all nodes have only one shard of the first index + for (Set indexesOnNode : indexShardsPerNode.values()) { + assertTrue("Each node should have a shard from test1", indexesOnNode.contains("test1")); + } + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Integration test to verify the behavior of INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING + * in a non-remote store environment. + * + * Scenario: + * An end-user attempts to create an index with INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING + * on a cluster where remote store is not enabled. + * + * Expected Outcome: + * The system should reject the index creation request and throw an appropriate exception, + * indicating that this setting is only applicable for remote store enabled clusters. + */ + public void testIndexTotalPrimaryShardsPerNodeSettingWithoutRemoteStore() { + // Attempt to create an index with INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING + Settings indexSettings = Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 3) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1) + .build(); + + // Assert that creating the index throws an exception + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> { createIndex("test_index", indexSettings); } + ); + + // Verify the exception message + assertTrue( + "Exception should mention that the setting requires remote store", + exception.getMessage() + .contains( + "Setting [index.routing.allocation.total_primary_shards_per_node] can only be used with remote store enabled clusters" + ) + ); + } + + /** + * Integration test to verify the behavior of CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING + * in a non-remote store environment. + * + * Scenario: + * An end-user attempts to create an index with CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING + * on a cluster where remote store is not enabled. + * + * Expected Outcome: + * The system should reject the index creation request and throw an appropriate exception, + * indicating that this setting is only applicable for remote store enabled clusters. + */ + public void testClusterTotalPrimaryShardsPerNodeSettingWithoutRemoteStore() { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { + updateClusterSetting(CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1); + }); + + // Verify the exception message + assertTrue( + "Exception should mention that the setting requires remote store", + exception.getMessage() + .contains( + "Setting [cluster.routing.allocation.total_primary_shards_per_node] can only be used with remote store enabled clusters" + ) + ); + + // Attempt to create an index with INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING + Settings indexSettings = Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 3) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 1) + .build(); + + createIndex("test_index", indexSettings); + } + + private void updateClusterSetting(String setting, int value) { + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(setting, value)).get(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java new file mode 100644 index 0000000000000..401db7790de92 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java @@ -0,0 +1,248 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation.decider; + +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.settings.Settings; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class ShardsLimitAllocationDeciderRemoteStoreEnabledIT extends RemoteStoreBaseIntegTestCase { + @Before + public void setup() { + setupCustomCluster(); + } + + private void setupCustomCluster() { + // Start cluster manager node first + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); + // Start data nodes + List dataNodes = internalCluster().startDataOnlyNodes(3); + // Wait for green cluster state + ensureGreen(); + } + + public void testIndexPrimaryShardLimit() throws Exception { + // Create first index with primary shard limit + Settings firstIndexSettings = Settings.builder() + .put(remoteStoreIndexSettings(0, 4)) // 4 shards, 0 replicas + .put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1) + .build(); + + // Create first index + createIndex("test1", firstIndexSettings); + + // Create second index + createIndex("test2", remoteStoreIndexSettings(0, 4)); + + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + // Check total number of shards (8 total: 4 from each index) + assertEquals("Total shards should be 8", 8, state.getRoutingTable().allShards().size()); + + // Count assigned and unassigned shards for test1 + int test1AssignedShards = 0; + int test1UnassignedShards = 0; + Map nodePrimaryCount = new HashMap<>(); + + // Check test1 shard distribution + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test1")) { + for (ShardRouting shard : shardRouting) { + if (shard.assignedToNode()) { + test1AssignedShards++; + // Count primaries per node for test1 + String nodeId = shard.currentNodeId(); + nodePrimaryCount.merge(nodeId, 1, Integer::sum); + } else { + test1UnassignedShards++; + } + } + } + + // Check test2 shard assignment + int test2UnassignedShards = 0; + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test2")) { + for (ShardRouting shard : shardRouting) { + if (!shard.assignedToNode()) { + test2UnassignedShards++; + } + } + } + + // Assertions + assertEquals("test1 should have 3 assigned shards", 3, test1AssignedShards); + assertEquals("test1 should have 1 unassigned shard", 1, test1UnassignedShards); + assertEquals("test2 should have no unassigned shards", 0, test2UnassignedShards); + + // Verify no node has more than one primary shard of test1 + for (Integer count : nodePrimaryCount.values()) { + assertTrue("No node should have more than 1 primary shard of test1", count <= 1); + } + }); + } + + public void testClusterPrimaryShardLimitss() throws Exception { + // Update cluster setting to limit primary shards per node + updateClusterSetting(CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1); + + // Create index with 4 shards and 1 replica + createIndex("test1", remoteStoreIndexSettings(1, 4)); + + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + // Check total number of shards (8 total: 4 primaries + 4 replicas) + assertEquals("Total shards should be 8", 8, state.getRoutingTable().allShards().size()); + + // Count assigned and unassigned shards for test1 + int assignedShards = 0; + int unassignedShards = 0; + int unassignedPrimaries = 0; + int unassignedReplicas = 0; + Map nodePrimaryCount = new HashMap<>(); + + // Check shard distribution + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test1")) { + for (ShardRouting shard : shardRouting) { + if (shard.assignedToNode()) { + assignedShards++; + if (shard.primary()) { + // Count primaries per node + String nodeId = shard.currentNodeId(); + nodePrimaryCount.merge(nodeId, 1, Integer::sum); + } + } else { + unassignedShards++; + if (shard.primary()) { + unassignedPrimaries++; + } else { + unassignedReplicas++; + } + } + } + } + + // Assertions + assertEquals("Should have 6 assigned shards", 6, assignedShards); + assertEquals("Should have 2 unassigned shards", 2, unassignedShards); + assertEquals("Should have 1 unassigned primary", 1, unassignedPrimaries); + assertEquals("Should have 1 unassigned replica", 1, unassignedReplicas); + + // Verify no node has more than one primary shard + for (Integer count : nodePrimaryCount.values()) { + assertTrue("No node should have more than 1 primary shard", count <= 1); + } + }); + } + + public void testCombinedIndexAndClusterPrimaryShardLimits() throws Exception { + // Set cluster-wide primary shard limit to 3 + updateClusterSetting(CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 3); + + // Create first index with index-level primary shard limit + Settings firstIndexSettings = Settings.builder() + .put(remoteStoreIndexSettings(1, 4)) // 4 shards, 1 replica + .put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1) + .build(); + + // Create first index + createIndex("test1", firstIndexSettings); + + // Create second index with no index-level limits + createIndex("test2", remoteStoreIndexSettings(1, 4)); // 4 shards, 1 replica + + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + // Check total number of shards (16 total: 8 from each index - 4 primaries + 4 replicas each) + assertEquals("Total shards should be 16", 16, state.getRoutingTable().allShards().size()); + + // Count assigned and unassigned shards for both indices + int totalAssignedShards = 0; + int test1UnassignedPrimaries = 0; + int test1UnassignedReplicas = 0; + int test2UnassignedShards = 0; + Map nodePrimaryCount = new HashMap<>(); + + // Check test1 shard distribution + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test1")) { + for (ShardRouting shard : shardRouting) { + if (shard.assignedToNode()) { + totalAssignedShards++; + if (shard.primary()) { + String nodeId = shard.currentNodeId(); + nodePrimaryCount.merge(nodeId, 1, Integer::sum); + } + } else { + if (shard.primary()) { + test1UnassignedPrimaries++; + } else { + test1UnassignedReplicas++; + } + } + } + } + + // Check test2 shard distribution + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test2")) { + for (ShardRouting shard : shardRouting) { + if (shard.assignedToNode()) { + totalAssignedShards++; + if (shard.primary()) { + String nodeId = shard.currentNodeId(); + nodePrimaryCount.merge(nodeId, 1, Integer::sum); + } + } else { + test2UnassignedShards++; + } + } + } + + // Assertions + assertEquals("Should have 14 assigned shards", 14, totalAssignedShards); + assertEquals("Should have 1 unassigned primary in test1", 1, test1UnassignedPrimaries); + assertEquals("Should have 1 unassigned replica in test1", 1, test1UnassignedReplicas); + assertEquals("Should have no unassigned shards in test2", 0, test2UnassignedShards); + + // Verify no node has more than one primary shard for test1 + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test1")) { + Map test1NodePrimaryCount = new HashMap<>(); + for (ShardRouting shard : shardRouting) { + if (shard.assignedToNode() && shard.primary()) { + test1NodePrimaryCount.merge(shard.currentNodeId(), 1, Integer::sum); + } + } + for (Integer count : test1NodePrimaryCount.values()) { + assertTrue("No node should have more than 1 primary shard of test1", count <= 1); + } + } + + // Verify no node has more than three primary shards total (cluster-wide limit) + for (Integer count : nodePrimaryCount.values()) { + assertTrue("No node should have more than 3 primary shards total", count <= 3); + } + }); + } + + private void updateClusterSetting(String setting, int value) { + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(setting, value)).get(); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 3988d50b2ce1e..60c04d5a620f8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -64,6 +64,7 @@ import java.io.IOException; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; import static org.opensearch.index.remote.RemoteStoreUtils.checkAndFinalizeRemoteStoreMigration; /** @@ -257,6 +258,7 @@ public void onFailure(String source, Exception e) { @Override public ClusterState execute(final ClusterState currentState) { + validateClusterTotalPrimaryShardsPerNodeSetting(currentState, request); boolean isCompatibilityModeChanging = validateCompatibilityModeSettingRequest(request, state); ClusterState clusterState = updater.updateSettings( currentState, @@ -324,4 +326,34 @@ private void validateAllNodesOfSameType(DiscoveryNodes discoveryNodes) { ); } } + + private void validateClusterTotalPrimaryShardsPerNodeSetting(ClusterState currentState, ClusterUpdateSettingsRequest request) { + if (request.transientSettings().hasValue(CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey()) + || request.persistentSettings().hasValue(CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey())) { + + Settings settings = Settings.builder().put(request.transientSettings()).put(request.persistentSettings()).build(); + + int newValue = CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.get(settings); + + // If default value (-1), no validation needed + if (newValue == -1) { + return; + } + + // Check current state + boolean allNodesRemoteStoreEnabled = currentState.nodes() + .getNodes() + .values() + .stream() + .allMatch(discoveryNode -> discoveryNode.isRemoteStoreNode()); + + if (!allNodesRemoteStoreEnabled) { + throw new IllegalArgumentException( + "Setting [" + + CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey() + + "] can only be used with remote store enabled clusters" + ); + } + } + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index d4fcadc4ac56d..cabea0efe8433 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -856,6 +856,7 @@ public Iterator> settings() { private final boolean isRemoteSnapshot; private final int indexTotalShardsPerNodeLimit; + private final int indexTotalPrimaryShardsPerNodeLimit; private final boolean isAppendOnlyIndex; private final Context context; @@ -888,6 +889,7 @@ private IndexMetadata( final Map rolloverInfos, final boolean isSystem, final int indexTotalShardsPerNodeLimit, + final int indexTotalPrimaryShardsPerNodeLimit, boolean isAppendOnlyIndex, final Context context ) { @@ -926,6 +928,7 @@ private IndexMetadata( this.isSystem = isSystem; this.isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); this.indexTotalShardsPerNodeLimit = indexTotalShardsPerNodeLimit; + this.indexTotalPrimaryShardsPerNodeLimit = indexTotalPrimaryShardsPerNodeLimit; this.isAppendOnlyIndex = isAppendOnlyIndex; this.context = context; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; @@ -1115,6 +1118,10 @@ public int getIndexTotalShardsPerNodeLimit() { return this.indexTotalShardsPerNodeLimit; } + public int getIndexTotalPrimaryShardsPerNodeLimit() { + return this.indexTotalPrimaryShardsPerNodeLimit; + } + public boolean isAppendOnlyIndex() { return this.isAppendOnlyIndex; } @@ -1913,6 +1920,9 @@ public IndexMetadata build() { } final int indexTotalShardsPerNodeLimit = ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(settings); + final int indexTotalPrimaryShardsPerNodeLimit = ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.get( + settings + ); final boolean isAppendOnlyIndex = INDEX_APPEND_ONLY_ENABLED_SETTING.get(settings); final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); @@ -1945,6 +1955,7 @@ public IndexMetadata build() { rolloverInfos, isSystem, indexTotalShardsPerNodeLimit, + indexTotalPrimaryShardsPerNodeLimit, isAppendOnlyIndex, context ); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index a2e1ca440512d..a81fe01f0e7f4 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -155,6 +155,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.metadata.Metadata.DEFAULT_REPLICA_COUNT_SETTING; import static org.opensearch.cluster.metadata.MetadataIndexTemplateService.findContextTemplateName; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.index.IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; @@ -1094,6 +1095,7 @@ static Settings aggregateIndexSettings( if (FeatureFlags.isEnabled(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING)) { validateSearchOnlyReplicasSettings(indexSettings); } + validateIndexTotalPrimaryShardsPerNodeSetting(indexSettings); return indexSettings; } @@ -1844,6 +1846,28 @@ public static void validateRefreshIntervalSettings(Settings requestSettings, Clu } } + /** + * Validates {@code index.routing.allocation.total_primary_shards_per_node} is only set for remote store enabled cluster + */ + // TODO : Update this check for SegRep to DocRep migration on need basis + public static void validateIndexTotalPrimaryShardsPerNodeSetting(Settings indexSettings) { + // Get the setting value + int indexPrimaryShardsPerNode = INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.get(indexSettings); + + // If default value (-1), no validation needed + if (indexPrimaryShardsPerNode == -1) { + return; + } + + // Check if remote store is enabled + boolean isRemoteStoreEnabled = IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings); + if (!isRemoteStoreEnabled) { + throw new IllegalArgumentException( + "Setting [" + INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey() + "] can only be used with remote store enabled clusters" + ); + } + } + /** * Validates {@code index.translog.durability} is not async if the {@code cluster.remote_store.index.restrict.async-durability} is set to true. * diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 5d20388b74e1f..b032ade720612 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -101,6 +101,7 @@ import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.MetadataCreateDataStreamService.validateTimestampFieldMapping; +import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateIndexTotalPrimaryShardsPerNodeSetting; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex; import static org.opensearch.common.util.concurrent.ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME; @@ -1642,6 +1643,9 @@ private void validate(String name, @Nullable Settings settings, List ind validateRefreshIntervalSettings(settings, clusterService.getClusterSettings()); validateTranslogFlushIntervalSettingsForCompositeIndex(settings, clusterService.getClusterSettings()); validateTranslogDurabilitySettingsInTemplate(settings, clusterService.getClusterSettings()); + + // validate index total primary shards per node setting + validateIndexTotalPrimaryShardsPerNodeSetting(settings); } if (indexPatterns.stream().anyMatch(Regex::isMatchAllPattern)) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index a35af0e607c31..eb10fd5d04288 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -78,6 +78,7 @@ import static org.opensearch.action.support.ContextPreservingActionListener.wrapPreservingContext; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateIndexTotalPrimaryShardsPerNodeSetting; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateOverlap; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogDurabilitySettings; @@ -139,6 +140,7 @@ public void updateSettings( validateRefreshIntervalSettings(normalizedSettings, clusterService.getClusterSettings()); validateTranslogDurabilitySettings(normalizedSettings, clusterService.getClusterSettings(), clusterService.getSettings()); + validateIndexTotalPrimaryShardsPerNodeSetting(normalizedSettings); final int defaultReplicaCount = clusterService.getClusterSettings().get(Metadata.DEFAULT_REPLICA_COUNT_SETTING); Settings.Builder settingsForClosedIndices = Settings.builder(); diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java index 24c3077960444..15f1b99ac2754 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java @@ -124,6 +124,62 @@ public Iterator iterator() { Collections.unmodifiableCollection(this.shardTuple.v2().values()).stream() ).iterator(); } + + public int numberOfPrimaryShards() { + return this.shardTuple.v1().size(); + } + } + + static class RelocatingShardsBucket { + private final LinkedHashSet relocatingShards; + private final LinkedHashSet relocatingPrimaryShards; + + RelocatingShardsBucket() { + relocatingShards = new LinkedHashSet<>(); + relocatingPrimaryShards = new LinkedHashSet<>(); + } + + public boolean add(ShardRouting shard) { + boolean res = relocatingShards.add(shard); + if (shard.primary()) { + relocatingPrimaryShards.add(shard); + } + return res; + } + + public boolean remove(ShardRouting shard) { + boolean res = relocatingShards.remove(shard); + relocatingPrimaryShards.remove(shard); + return res; + } + + public int size() { + return relocatingShards.size(); + } + + public int primarySize() { + return relocatingPrimaryShards.size(); + } + + public Set getRelocatingShards() { + return Collections.unmodifiableSet(relocatingShards); + } + + public Set getRelocatingPrimaryShards() { + return Collections.unmodifiableSet(relocatingPrimaryShards); + } + + public List getRelocatingShardsList() { + return new ArrayList<>(relocatingShards); + } + + // For assertions/verification + public boolean invariant() { + assert relocatingShards.containsAll(relocatingPrimaryShards); + assert relocatingPrimaryShards.stream().allMatch(ShardRouting::primary); + assert relocatingPrimaryShards.size() == relocatingShards.stream().filter(ShardRouting::primary).count(); + return true; + } } private final String nodeId; @@ -132,9 +188,9 @@ public Iterator iterator() { private final BucketedShards shards; - private final LinkedHashSet initializingShards; + private final RelocatingShardsBucket relocatingShardsBucket; - private final LinkedHashSet relocatingShards; + private final LinkedHashSet initializingShards; private final HashMap> shardsByIndex; @@ -144,7 +200,7 @@ public RoutingNode(String nodeId, DiscoveryNode node, ShardRouting... shardRouti final LinkedHashMap primaryShards = new LinkedHashMap<>(); final LinkedHashMap replicaShards = new LinkedHashMap<>(); this.shards = new BucketedShards(primaryShards, replicaShards); - this.relocatingShards = new LinkedHashSet<>(); + this.relocatingShardsBucket = new RelocatingShardsBucket(); this.initializingShards = new LinkedHashSet<>(); this.shardsByIndex = new LinkedHashMap<>(); @@ -152,7 +208,7 @@ public RoutingNode(String nodeId, DiscoveryNode node, ShardRouting... shardRouti if (shardRouting.initializing()) { initializingShards.add(shardRouting); } else if (shardRouting.relocating()) { - relocatingShards.add(shardRouting); + relocatingShardsBucket.add(shardRouting); } shardsByIndex.computeIfAbsent(shardRouting.index(), k -> new LinkedHashSet<>()).add(shardRouting); @@ -231,7 +287,7 @@ void add(ShardRouting shard) { if (shard.initializing()) { initializingShards.add(shard); } else if (shard.relocating()) { - relocatingShards.add(shard); + relocatingShardsBucket.add(shard); } shardsByIndex.computeIfAbsent(shard.index(), k -> new LinkedHashSet<>()).add(shard); assert invariant(); @@ -251,7 +307,7 @@ void update(ShardRouting oldShard, ShardRouting newShard) { boolean exist = initializingShards.remove(oldShard); assert exist : "expected shard " + oldShard + " to exist in initializingShards"; } else if (oldShard.relocating()) { - boolean exist = relocatingShards.remove(oldShard); + boolean exist = relocatingShardsBucket.remove(oldShard); assert exist : "expected shard " + oldShard + " to exist in relocatingShards"; } shardsByIndex.get(oldShard.index()).remove(oldShard); @@ -261,7 +317,7 @@ void update(ShardRouting oldShard, ShardRouting newShard) { if (newShard.initializing()) { initializingShards.add(newShard); } else if (newShard.relocating()) { - relocatingShards.add(newShard); + relocatingShardsBucket.add(newShard); } shardsByIndex.computeIfAbsent(newShard.index(), k -> new LinkedHashSet<>()).add(newShard); assert invariant(); @@ -275,7 +331,7 @@ void remove(ShardRouting shard) { boolean exist = initializingShards.remove(shard); assert exist : "expected shard " + shard + " to exist in initializingShards"; } else if (shard.relocating()) { - boolean exist = relocatingShards.remove(shard); + boolean exist = relocatingShardsBucket.remove(shard); assert exist : "expected shard " + shard + " to exist in relocatingShards"; } shardsByIndex.get(shard.index()).remove(shard); @@ -295,7 +351,7 @@ public int numberOfShardsWithState(ShardRoutingState... states) { if (states[0] == ShardRoutingState.INITIALIZING) { return initializingShards.size(); } else if (states[0] == ShardRoutingState.RELOCATING) { - return relocatingShards.size(); + return relocatingShardsBucket.size(); } } @@ -320,7 +376,7 @@ public List shardsWithState(ShardRoutingState... states) { if (states[0] == ShardRoutingState.INITIALIZING) { return new ArrayList<>(initializingShards); } else if (states[0] == ShardRoutingState.RELOCATING) { - return new ArrayList<>(relocatingShards); + return relocatingShardsBucket.getRelocatingShardsList(); } } @@ -354,7 +410,7 @@ public List shardsWithState(String index, ShardRoutingState... sta } return shards; } else if (states[0] == ShardRoutingState.RELOCATING) { - for (ShardRouting shardEntry : relocatingShards) { + for (ShardRouting shardEntry : relocatingShardsBucket.getRelocatingShards()) { if (shardEntry.getIndexName().equals(index) == false) { continue; } @@ -381,7 +437,11 @@ public List shardsWithState(String index, ShardRoutingState... sta * The number of shards on this node that will not be eventually relocated. */ public int numberOfOwningShards() { - return shards.size() - relocatingShards.size(); + return shards.size() - relocatingShardsBucket.size(); + } + + public int numberOfOwningPrimaryShards() { + return shards.numberOfPrimaryShards() - relocatingShardsBucket.primarySize(); } public int numberOfOwningShardsForIndex(final Index index) { @@ -393,6 +453,20 @@ public int numberOfOwningShardsForIndex(final Index index) { } } + public int numberOfOwningPrimaryShardsForIndex(final Index index) { + final LinkedHashSet shardRoutings = shardsByIndex.get(index); + if (shardRoutings == null) { + return 0; + } else { + return Math.toIntExact( + shardRoutings.stream() + .filter(sr -> sr.relocating() == false) + .filter(ShardRouting::primary) // Add this filter for primary shards + .count() + ); + } + } + public String prettyPrint() { StringBuilder sb = new StringBuilder(); sb.append("-----node_id[").append(nodeId).append("][").append(node == null ? "X" : "V").append("]\n"); @@ -441,8 +515,19 @@ private boolean invariant() { Collection shardRoutingsRelocating = StreamSupport.stream(shards.spliterator(), false) .filter(ShardRouting::relocating) .collect(Collectors.toList()); - assert relocatingShards.size() == shardRoutingsRelocating.size(); - assert relocatingShards.containsAll(shardRoutingsRelocating); + assert relocatingShardsBucket.getRelocatingShards().size() == shardRoutingsRelocating.size(); + assert relocatingShardsBucket.getRelocatingShards().containsAll(shardRoutingsRelocating); + + // relocatingPrimaryShards must be consistent with primary shards that are relocating + Collection primaryShardRoutingsRelocating = StreamSupport.stream(shards.spliterator(), false) + .filter(ShardRouting::relocating) + .filter(ShardRouting::primary) + .collect(Collectors.toList()); + assert relocatingShardsBucket.getRelocatingPrimaryShards().size() == primaryShardRoutingsRelocating.size(); + assert relocatingShardsBucket.getRelocatingPrimaryShards().containsAll(primaryShardRoutingsRelocating); + + // relocatingPrimaryShards and relocatingShards should be consistent + assert relocatingShardsBucket.invariant(); final Map> shardRoutingsByIndex = StreamSupport.stream(shards.spliterator(), false) .collect(Collectors.groupingBy(ShardRouting::index, Collectors.toSet())); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index 6f211f370de95..ad77aed4e4fd5 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.routing.allocation.decider; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; @@ -46,13 +47,14 @@ /** * This {@link AllocationDecider} limits the number of shards per node on a per * index or node-wide basis. The allocator prevents a single node to hold more - * than {@code index.routing.allocation.total_shards_per_node} per index and - * {@code cluster.routing.allocation.total_shards_per_node} globally during the allocation + * than {@code index.routing.allocation.total_shards_per_node} per index, {@code index.routing.allocation.total_primary_shards_per_node} per index, + * {@code cluster.routing.allocation.total_shards_per_node} globally and + * {@code cluster.routing.allocation.total_primary_shards_per_node} globally during the allocation * process. The limits of this decider can be changed in real-time via a the * index settings API. *

- * If {@code index.routing.allocation.total_shards_per_node} is reset to a negative value shards - * per index are unlimited per node. Shards currently in the + * If {@code index.routing.allocation.total_shards_per_node} or {@code index.routing.allocation.total_primary_shards_per_node}is reset to a negative value shards + * per index are unlimited per node or primary shards per index are unlimited per node respectively. Shards currently in the * {@link ShardRoutingState#RELOCATING relocating} state are ignored by this * {@link AllocationDecider} until the shard changed its state to either * {@link ShardRoutingState#STARTED started}, @@ -70,6 +72,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { public static final String NAME = "shards_limit"; private volatile int clusterShardLimit; + private volatile int clusterPrimaryShardLimit; /** * Controls the maximum number of shards per index on a single OpenSearch @@ -84,7 +87,19 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { ); /** - * Controls the maximum number of shards per node on a global level. + * Controls the maximum number of primary shards per index on a single OpenSearch + * node for segment replication enabled indices. Negative values are interpreted as unlimited. + */ + public static final Setting INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING = Setting.intSetting( + "index.routing.allocation.total_primary_shards_per_node", + -1, + -1, + Property.Dynamic, + Property.IndexScope + ); + + /** + * Controls the maximum number of shards per node on a cluster level. * Negative values are interpreted as unlimited. */ public static final Setting CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting( @@ -95,18 +110,36 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { Property.NodeScope ); + /** + * Controls the maximum number of primary shards per node on a cluster level. + * Negative values are interpreted as unlimited. + */ + public static final Setting CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING = Setting.intSetting( + "cluster.routing.allocation.total_primary_shards_per_node", + -1, + -1, + Property.Dynamic, + Property.NodeScope + ); + private final Settings settings; public ShardsLimitAllocationDecider(Settings settings, ClusterSettings clusterSettings) { this.settings = settings; this.clusterShardLimit = CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.get(settings); + this.clusterPrimaryShardLimit = CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, this::setClusterShardLimit); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING, this::setClusterPrimaryShardLimit); } private void setClusterShardLimit(int clusterShardLimit) { this.clusterShardLimit = clusterShardLimit; } + private void setClusterPrimaryShardLimit(int clusterPrimaryShardLimit) { + this.clusterPrimaryShardLimit = clusterPrimaryShardLimit; + } + @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return doDecide(shardRouting, node, allocation, (count, limit) -> count >= limit); @@ -115,7 +148,6 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing @Override public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return doDecide(shardRouting, node, allocation, (count, limit) -> count > limit); - } private Decision doDecide( @@ -124,18 +156,22 @@ private Decision doDecide( RoutingAllocation allocation, BiPredicate decider ) { - final int indexShardLimit = allocation.metadata().getIndexSafe(shardRouting.index()).getIndexTotalShardsPerNodeLimit(); + IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); + final int indexShardLimit = indexMetadata.getIndexTotalShardsPerNodeLimit(); + final int indexPrimaryShardLimit = indexMetadata.getIndexTotalPrimaryShardsPerNodeLimit(); // Capture the limit here in case it changes during this method's // execution final int clusterShardLimit = this.clusterShardLimit; - - if (indexShardLimit <= 0 && clusterShardLimit <= 0) { + final int clusterPrimaryShardLimit = this.clusterPrimaryShardLimit; + if (indexShardLimit <= 0 && indexPrimaryShardLimit <= 0 && clusterShardLimit <= 0 && clusterPrimaryShardLimit <= 0) { return allocation.decision( Decision.YES, NAME, - "total shard limits are disabled: [index: %d, cluster: %d] <= 0", + "total shard limits are disabled: [index: %d, index primary: %d, cluster: %d, cluster primary: %d] <= 0", indexShardLimit, - clusterShardLimit + indexPrimaryShardLimit, + clusterShardLimit, + clusterPrimaryShardLimit ); } @@ -151,6 +187,19 @@ private Decision doDecide( clusterShardLimit ); } + if (shardRouting.primary() && clusterPrimaryShardLimit > 0) { + final int nodePrimaryShardCount = node.numberOfOwningPrimaryShards(); + if (decider.test(nodePrimaryShardCount, clusterPrimaryShardLimit)) { + return allocation.decision( + Decision.NO, + NAME, + "too many primary shards [%d] allocated to this node, cluster setting [%s=%d]", + nodePrimaryShardCount, + CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), + clusterPrimaryShardLimit + ); + } + } if (indexShardLimit > 0) { final int indexShardCount = node.numberOfOwningShardsForIndex(shardRouting.index()); if (decider.test(indexShardCount, indexShardLimit)) { @@ -165,13 +214,29 @@ private Decision doDecide( ); } } + if (indexPrimaryShardLimit > 0 && shardRouting.primary()) { + final int indexPrimaryShardCount = node.numberOfOwningPrimaryShardsForIndex(shardRouting.index()); + if (decider.test(indexPrimaryShardCount, indexPrimaryShardLimit)) { + return allocation.decision( + Decision.NO, + NAME, + "too many primary shards [%d] allocated to this node for index [%s], index setting [%s=%d]", + indexPrimaryShardCount, + shardRouting.getIndexName(), + INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), + indexPrimaryShardLimit + ); + } + } return allocation.decision( Decision.YES, NAME, - "the shard count [%d] for this node is under the index limit [%d] and cluster level node limit [%d]", + "the shard count [%d] for this node is under the index limit [%d], index primary limit [%d], cluster level node limit [%d] and cluster level primary node limit [%d]", nodeShardCount, indexShardLimit, - clusterShardLimit + indexPrimaryShardLimit, + clusterShardLimit, + clusterPrimaryShardLimit ); } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index d204c383524c2..b4b85e0a9d367 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -433,6 +433,7 @@ public void apply(Settings value, Settings current, Settings previous) { SniffConnectionStrategy.REMOTE_NODE_CONNECTIONS, TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, + ShardsLimitAllocationDecider.CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING, NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING, HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 946d7fe734deb..dc77ffd720bad 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -166,6 +166,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.MAX_SLICES_PER_PIT, IndexSettings.MAX_REGEX_LENGTH_SETTING, ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING, + ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING, IndexSettings.INDEX_GC_DELETES_SETTING, IndexSettings.INDEX_SOFT_DELETES_SETTING, IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index cc35426ee15b8..dfe3928ac37f3 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -155,6 +155,7 @@ import static org.opensearch.cluster.metadata.MetadataCreateIndexService.getIndexNumberOfRoutingShards; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.parseV1Mappings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.resolveAndValidateAliases; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.index.IndexSettings.INDEX_MERGE_POLICY; @@ -2548,6 +2549,96 @@ public void testApplyContextWithSettingsOverlap() throws IOException { }); } + public void testIndexTotalPrimaryShardsPerNodeSettingValidationWithRemoteStore() { + // Test case where setting is used with remote store enabled (should succeed) + Settings settings = Settings.builder().build(); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + + final Settings.Builder requestSettings = Settings.builder() + // Enable remote store + .put(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.getKey(), true) + // Set primary shards per node to valid value + .put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 2) + .put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT.toString()); + + request.settings(requestSettings.build()); + + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + settings, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + + // Verify that the value is the same as set earlier and validation was successful + assertEquals(Integer.valueOf(2), INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.get(indexSettings)); + } + + public void testIndexTotalPrimaryShardsPerNodeSettingValidationWithoutRemoteStore() { + // Test case where setting is used without remote store (should fail) + Settings settings = Settings.builder().build(); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + + final Settings.Builder requestSettings = Settings.builder() + // Remote store not enabled + .put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 2) + .put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT.toString()); + + request.settings(requestSettings.build()); + + // Expect IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + settings, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ) + ); + + // Verify error message + assertEquals( + "Setting [index.routing.allocation.total_primary_shards_per_node] can only be used with remote store enabled clusters", + exception.getMessage() + ); + } + + public void testIndexTotalPrimaryShardsPerNodeSettingValidationWithDefaultValue() { + // Test case with default value (-1) without remote store (should succeed) + Settings settings = Settings.builder().build(); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + + final Settings.Builder requestSettings = Settings.builder().put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), -1); + + request.settings(requestSettings.build()); + + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + settings, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + + // Verify that default value passes validation + assertEquals(Integer.valueOf(-1), INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.get(indexSettings)); + } + private IndexTemplateMetadata addMatchingTemplate(Consumer configurator) { IndexTemplateMetadata.Builder builder = templateMetadataBuilder("template1", "te*"); configurator.accept(builder); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 05ae67d10f4cb..795d1713772c2 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -69,6 +69,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexTemplateException; import org.opensearch.indices.SystemIndices; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.threadpool.ThreadPool; @@ -92,6 +93,7 @@ import static java.util.Collections.singletonList; import static org.opensearch.cluster.applicationtemplates.ClusterStateSystemTemplateLoader.TEMPLATE_LOADER_IDENTIFIER; import static org.opensearch.cluster.applicationtemplates.SystemTemplateMetadata.fromComponentTemplateInfo; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; import static org.opensearch.common.settings.Settings.builder; import static org.opensearch.common.util.concurrent.ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME; import static org.opensearch.env.Environment.PATH_HOME_SETTING; @@ -2440,6 +2442,23 @@ public void testMaxTranslogFlushSizeWithCompositeIndex() { assertThat(throwables.get(0), instanceOf(IllegalArgumentException.class)); } + public void testIndexPrimaryShardsSetting() { + Settings clusterSettings = Settings.builder().build(); + PutRequest request = new PutRequest("test", "test_index_primary_shard_constraint"); + request.patterns(singletonList("test_shards_wait*")); + Settings.Builder settingsBuilder = builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") + .put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 2) + .put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT.toString()); + request.settings(settingsBuilder.build()); + List throwables = putTemplate(xContentRegistry(), request, clusterSettings); + assertThat(throwables.get(0), instanceOf(IllegalArgumentException.class)); + assertEquals( + "Setting [index.routing.allocation.total_primary_shards_per_node] can only be used with remote store enabled clusters", + throwables.get(0).getMessage() + ); + } + private static List putTemplate(NamedXContentRegistry xContentRegistry, PutRequest request) { return putTemplate(xContentRegistry, request, Settings.EMPTY); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java index cc4f2e510cb31..c78e5582155d1 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java @@ -165,6 +165,32 @@ public void testNumberOfOwningShards() { assertThat(routingNode.numberOfOwningShards(), equalTo(2)); } + public void testNumberOfOwningPrimaryShards() { + final ShardRouting test1Shard0 = TestShardRouting.newShardRouting("test1", 0, "node-1", true, ShardRoutingState.STARTED); + final ShardRouting test2Shard0 = TestShardRouting.newShardRouting( + "test2", + 0, + "node-1", + "node-2", + false, + ShardRoutingState.RELOCATING + ); + final ShardRouting test3Shard0 = TestShardRouting.newShardRouting( + "test3", + 0, + "node-1", + "node-2", + true, + ShardRoutingState.RELOCATING + ); + final ShardRouting test3Shard1 = TestShardRouting.newShardRouting("test3", 1, "node-1", true, ShardRoutingState.STARTED); + routingNode.add(test1Shard0); + routingNode.add(test2Shard0); + routingNode.add(test3Shard0); + routingNode.add(test3Shard1); + assertThat(routingNode.numberOfOwningPrimaryShards(), equalTo(2)); + } + public void testNumberOfOwningShardsForIndex() { final ShardRouting test1Shard0 = TestShardRouting.newShardRouting("test1", 0, "node-1", false, ShardRoutingState.STARTED); final ShardRouting test2Shard0 = TestShardRouting.newShardRouting( @@ -183,4 +209,33 @@ public void testNumberOfOwningShardsForIndex() { assertThat(routingNode.numberOfOwningShardsForIndex(new Index("test3", IndexMetadata.INDEX_UUID_NA_VALUE)), equalTo(0)); } + public void testNumberOfOwningPrimaryShardsForIndex() { + final ShardRouting test1Shard0 = TestShardRouting.newShardRouting("test1", 0, "node-1", true, ShardRoutingState.STARTED); + final ShardRouting test2Shard0 = TestShardRouting.newShardRouting( + "test2", + 0, + "node-1", + "node-2", + false, + ShardRoutingState.RELOCATING + ); + final ShardRouting test3Shard0 = TestShardRouting.newShardRouting( + "test3", + 0, + "node-1", + "node-2", + true, + ShardRoutingState.RELOCATING + ); + final ShardRouting test3Shard1 = TestShardRouting.newShardRouting("test3", 1, "node-1", true, ShardRoutingState.STARTED); + routingNode.add(test1Shard0); + routingNode.add(test2Shard0); + routingNode.add(test3Shard0); + routingNode.add(test3Shard1); + assertThat(routingNode.numberOfOwningPrimaryShardsForIndex(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE)), equalTo(0)); + assertThat(routingNode.numberOfOwningPrimaryShardsForIndex(new Index("test1", IndexMetadata.INDEX_UUID_NA_VALUE)), equalTo(1)); + assertThat(routingNode.numberOfOwningPrimaryShardsForIndex(new Index("test2", IndexMetadata.INDEX_UUID_NA_VALUE)), equalTo(0)); + assertThat(routingNode.numberOfOwningPrimaryShardsForIndex(new Index("test3", IndexMetadata.INDEX_UUID_NA_VALUE)), equalTo(1)); + } + } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderTests.java new file mode 100644 index 0000000000000..ffc42d11d3696 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderTests.java @@ -0,0 +1,349 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation.decider; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.OpenSearchTestCase; + +import static org.opensearch.cluster.routing.allocation.decider.Decision.Type.NO; +import static org.opensearch.cluster.routing.allocation.decider.Decision.Type.YES; + +public class ShardsLimitAllocationDeciderTests extends OpenSearchTestCase { + + public void testWithNoLimit() { + Settings settings = Settings.builder().build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ShardsLimitAllocationDecider decider = new ShardsLimitAllocationDecider(settings, clusterSettings); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(0)) + .build(); + + // Create a RoutingTable with shards 0 and 1 initialized on node1, and shard 2 unassigned + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(metadata.index("test").getIndex()); + + // Shard 0 and 1: STARTED on node1 + indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test", 0, "node1", null, true, ShardRoutingState.STARTED)); + indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test", 1, "node1", null, true, ShardRoutingState.STARTED)); + + // Shard 2: Unassigned + indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + + routingTableBuilder.add(indexRoutingTableBuilder.build()); + RoutingTable routingTable = routingTableBuilder.build(); + + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + + RoutingAllocation allocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, null, 0); + allocation.debugDecision(true); + + ShardRouting shard1 = routingTable.index("test").shard(0).primaryShard(); + ShardRouting shard2 = routingTable.index("test").shard(1).primaryShard(); + ShardRouting shard3 = routingTable.index("test").shard(2).primaryShard(); + + // Test allocation decisions + assertEquals(YES, decider.canAllocate(shard3, clusterState.getRoutingNodes().node("node1"), allocation).type()); + assertEquals(YES, decider.canRemain(shard1, clusterState.getRoutingNodes().node("node1"), allocation).type()); + assertEquals(YES, decider.canAllocate(shard3, clusterState.getRoutingNodes().node("node2"), allocation).type()); + } + + public void testClusterShardLimit() { + Settings settings = Settings.builder().put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 2).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ShardsLimitAllocationDecider decider = new ShardsLimitAllocationDecider(settings, clusterSettings); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(0)) + .build(); + + // Create a RoutingTable with shards 0 and 1 initialized on node1, and shard 2 unassigned + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(metadata.index("test").getIndex()); + + // Shard 0 and 1: STARTED on node1 + indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test", 0, "node1", null, true, ShardRoutingState.STARTED)); + indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test", 1, "node1", null, true, ShardRoutingState.STARTED)); + + // Shard 2: Unassigned + indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + + routingTableBuilder.add(indexRoutingTableBuilder.build()); + RoutingTable routingTable = routingTableBuilder.build(); + + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + + RoutingAllocation allocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, null, 0); + allocation.debugDecision(true); + + ShardRouting shard1 = routingTable.index("test").shard(0).primaryShard(); + ShardRouting shard2 = routingTable.index("test").shard(1).primaryShard(); + ShardRouting shard3 = routingTable.index("test").shard(2).primaryShard(); + + // Test allocation decisions + assertEquals(NO, decider.canAllocate(shard3, clusterState.getRoutingNodes().node("node1"), allocation).type()); + assertEquals(YES, decider.canRemain(shard1, clusterState.getRoutingNodes().node("node1"), allocation).type()); + assertEquals(YES, decider.canAllocate(shard3, clusterState.getRoutingNodes().node("node2"), allocation).type()); + } + + public void testClusterPrimaryShardLimit() { + Settings settings = Settings.builder() + .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 2) + .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 3) + .build(); + + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ShardsLimitAllocationDecider decider = new ShardsLimitAllocationDecider(settings, clusterSettings); + + // Create metadata for two indices + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(0)) + .put(IndexMetadata.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1)) + .build(); + + // Create routing table + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + + // Setup routing for test1 (3 primaries) + IndexRoutingTable.Builder test1RoutingTableBuilder = IndexRoutingTable.builder(metadata.index("test1").getIndex()); + + // test1: First primary on node1 + test1RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test1", 0, "node1", null, true, ShardRoutingState.STARTED)); + + // test1: Second primary on node2 + test1RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test1", 1, "node2", null, true, ShardRoutingState.STARTED)); + + // test1: Third primary unassigned + test1RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test1", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + + // Setup routing for test2 (2 primaries, 1 replica) + IndexRoutingTable.Builder test2RoutingTableBuilder = IndexRoutingTable.builder(metadata.index("test2").getIndex()); + + // test2: First primary on node1 + test2RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test2", 0, "node1", null, true, ShardRoutingState.STARTED)); + + // test2: Second primary on node2 + test2RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test2", 1, "node2", null, true, ShardRoutingState.STARTED)); + + // test2: First replica on node2 + test2RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test2", 0, "node2", null, false, ShardRoutingState.STARTED)); + // test2: Second replica unassigned + test2RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test2", 1, null, null, false, ShardRoutingState.UNASSIGNED)); + + routingTableBuilder.add(test1RoutingTableBuilder.build()); + routingTableBuilder.add(test2RoutingTableBuilder.build()); + RoutingTable routingTable = routingTableBuilder.build(); + + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + + RoutingAllocation allocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, null, 0); + allocation.debugDecision(true); + + // Get shards for testing + ShardRouting test1Shard1 = routingTable.index("test1").shard(0).primaryShard(); + ShardRouting test1Shard3 = routingTable.index("test1").shard(2).primaryShard(); + ShardRouting test2Replica2 = routingTable.index("test2").shard(1).replicaShards().get(0); + + // Test allocation decisions + // Cannot allocate third primary to node1 (would exceed primary shard limit) + assertEquals(NO, decider.canAllocate(test1Shard3, clusterState.getRoutingNodes().node("node1"), allocation).type()); + + // Cannot allocate third primary to node2 (would exceed primary shard limit) + assertEquals(NO, decider.canAllocate(test1Shard3, clusterState.getRoutingNodes().node("node2"), allocation).type()); + + // Can allocate second replica to node1 (within total shard limit) + assertEquals(YES, decider.canAllocate(test2Replica2, clusterState.getRoutingNodes().node("node1"), allocation).type()); + + // Cannot allocate second replica to node2 (would exceed total shard limit) + assertEquals(NO, decider.canAllocate(test2Replica2, clusterState.getRoutingNodes().node("node2"), allocation).type()); + + // Existing primary can remain + assertEquals(YES, decider.canRemain(test1Shard1, clusterState.getRoutingNodes().node("node1"), allocation).type()); + + } + + public void testIndexShardLimit() { + Settings clusterSettings = Settings.builder() + .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 2) + .build(); + ClusterSettings clusterSettingsObject = new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ShardsLimitAllocationDecider decider = new ShardsLimitAllocationDecider(clusterSettings, clusterSettingsObject); + + // Create index settings with INDEX_TOTAL_SHARDS_PER_NODE_SETTING and version + Settings indexSettings = Settings.builder() + .put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 1) // Set index-level limit to 1 + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test1").settings(indexSettings).numberOfShards(3).numberOfReplicas(0)) + .put(IndexMetadata.builder("test2").settings(indexSettings).numberOfShards(3).numberOfReplicas(0)) + .build(); + + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + + // Set up routing table for test1 + IndexRoutingTable.Builder test1RoutingTableBuilder = IndexRoutingTable.builder(metadata.index("test1").getIndex()); + test1RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test1", 0, "node1", null, true, ShardRoutingState.STARTED)); + test1RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test1", 1, null, null, true, ShardRoutingState.UNASSIGNED)); + test1RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test1", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + routingTableBuilder.add(test1RoutingTableBuilder.build()); + + // Set up routing table for test2 + IndexRoutingTable.Builder test2RoutingTableBuilder = IndexRoutingTable.builder(metadata.index("test2").getIndex()); + test2RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test2", 0, "node2", null, true, ShardRoutingState.STARTED)); + test2RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test2", 1, null, null, true, ShardRoutingState.UNASSIGNED)); + test2RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test2", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + routingTableBuilder.add(test2RoutingTableBuilder.build()); + + RoutingTable routingTable = routingTableBuilder.build(); + + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + + RoutingAllocation allocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, null, 0); + allocation.debugDecision(true); + + // Test allocation decisions + ShardRouting test1Shard1 = routingTable.index("test1").shard(1).primaryShard(); + ShardRouting test1Shard2 = routingTable.index("test1").shard(2).primaryShard(); + ShardRouting test2Shard1 = routingTable.index("test2").shard(1).primaryShard(); + ShardRouting test2Shard2 = routingTable.index("test2").shard(2).primaryShard(); + + assertEquals(NO, decider.canAllocate(test1Shard2, clusterState.getRoutingNodes().node("node1"), allocation).type()); + assertEquals(YES, decider.canRemain(test1Shard1, clusterState.getRoutingNodes().node("node1"), allocation).type()); + assertEquals(YES, decider.canAllocate(test1Shard2, clusterState.getRoutingNodes().node("node2"), allocation).type()); + assertEquals(NO, decider.canAllocate(test2Shard2, clusterState.getRoutingNodes().node("node2"), allocation).type()); + assertEquals(YES, decider.canRemain(test2Shard1, clusterState.getRoutingNodes().node("node2"), allocation).type()); + assertEquals(YES, decider.canAllocate(test2Shard2, clusterState.getRoutingNodes().node("node1"), allocation).type()); + } + + public void testIndexPrimaryShardLimit() { + Settings clusterSettings = Settings.builder() + .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), -1) + .build(); + ClusterSettings clusterSettingsObject = new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ShardsLimitAllocationDecider decider = new ShardsLimitAllocationDecider(clusterSettings, clusterSettingsObject); + + // Create index settings for three indices + Settings indexSettingsTest1 = Settings.builder() + .put(ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT.toString()) + .build(); + + Settings indexSettingsTest2 = Settings.builder() + .put(ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 2) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + + Settings indexSettingsTest3 = Settings.builder() + .put(ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test1").settings(indexSettingsTest1).numberOfShards(3).numberOfReplicas(0)) + .put(IndexMetadata.builder("test2").settings(indexSettingsTest2).numberOfShards(3).numberOfReplicas(0)) + .put(IndexMetadata.builder("test3").settings(indexSettingsTest3).numberOfShards(3).numberOfReplicas(0)) + .build(); + + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + + // Set up routing table for test1 + IndexRoutingTable.Builder test1Builder = IndexRoutingTable.builder(metadata.index("test1").getIndex()); + test1Builder.addShard(TestShardRouting.newShardRouting("test1", 0, "node1", null, true, ShardRoutingState.STARTED)); + test1Builder.addShard(TestShardRouting.newShardRouting("test1", 1, "node2", null, true, ShardRoutingState.STARTED)); + test1Builder.addShard(TestShardRouting.newShardRouting("test1", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + routingTableBuilder.add(test1Builder.build()); + + // Set up routing table for test2 + IndexRoutingTable.Builder test2Builder = IndexRoutingTable.builder(metadata.index("test2").getIndex()); + test2Builder.addShard(TestShardRouting.newShardRouting("test2", 0, "node1", null, true, ShardRoutingState.STARTED)); + test2Builder.addShard(TestShardRouting.newShardRouting("test2", 1, "node2", null, true, ShardRoutingState.STARTED)); + test2Builder.addShard(TestShardRouting.newShardRouting("test2", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + routingTableBuilder.add(test2Builder.build()); + + // Set up routing table for test3 + IndexRoutingTable.Builder test3Builder = IndexRoutingTable.builder(metadata.index("test3").getIndex()); + test3Builder.addShard(TestShardRouting.newShardRouting("test3", 0, "node1", null, true, ShardRoutingState.STARTED)); + test3Builder.addShard(TestShardRouting.newShardRouting("test3", 1, "node2", null, true, ShardRoutingState.STARTED)); + test3Builder.addShard(TestShardRouting.newShardRouting("test3", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + routingTableBuilder.add(test3Builder.build()); + + RoutingTable routingTable = routingTableBuilder.build(); + + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + + RoutingAllocation allocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, null, 0); + allocation.debugDecision(true); + + // Get unassigned shards for testing + ShardRouting test1Shard2 = routingTable.index("test1").shard(2).primaryShard(); + ShardRouting test2Shard2 = routingTable.index("test2").shard(2).primaryShard(); + ShardRouting test3Shard2 = routingTable.index("test3").shard(2).primaryShard(); + + // Test assertions + assertEquals(NO, decider.canAllocate(test1Shard2, clusterState.getRoutingNodes().node("node1"), allocation).type()); // Cannot + // assign 3rd + // shard of + // test1 to + // node1 + assertEquals(NO, decider.canAllocate(test3Shard2, clusterState.getRoutingNodes().node("node2"), allocation).type()); // Cannot + // assign 3rd + // shard of + // test3 to + // node2 + assertEquals(YES, decider.canAllocate(test2Shard2, clusterState.getRoutingNodes().node("node1"), allocation).type()); // Can assign + // 3rd shard + // of test2 to + // node1 + assertEquals(YES, decider.canAllocate(test2Shard2, clusterState.getRoutingNodes().node("node2"), allocation).type()); // Can assign + // 3rd shard + // of test2 to + // node2 + } + + private DiscoveryNode newNode(String nodeId) { + return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), Version.CURRENT); + } +} From a728cae0f4771fa19c60ee9d5794595d32b99bce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Feb 2025 22:43:15 -0500 Subject: [PATCH 20/48] Bump org.awaitility:awaitility from 4.2.2 to 4.3.0 in /server (#17439) * Bump org.awaitility:awaitility from 4.2.2 to 4.3.0 in /server Bumps [org.awaitility:awaitility](https://github.com/awaitility/awaitility) from 4.2.2 to 4.3.0. - [Changelog](https://github.com/awaitility/awaitility/blob/master/changelog.txt) - [Commits](https://github.com/awaitility/awaitility/compare/awaitility-4.2.2...awaitility-4.3.0) --- updated-dependencies: - dependency-name: org.awaitility:awaitility dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 2 +- server/build.gradle | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 428cb6a8073d6..70f8a48d19ba3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Rule Based Auto-tagging] Add in-memory attribute value store ([#17342](https://github.com/opensearch-project/OpenSearch/pull/17342)) ### Dependencies -- Bump `org.awaitility:awaitility` from 4.2.0 to 4.2.2 ([#17230](https://github.com/opensearch-project/OpenSearch/pull/17230)) +- Bump `org.awaitility:awaitility` from 4.2.0 to 4.3.0 ([#17230](https://github.com/opensearch-project/OpenSearch/pull/17230), [#17439](https://github.com/opensearch-project/OpenSearch/pull/17439)) - Bump `dnsjava:dnsjava` from 3.6.2 to 3.6.3 ([#17231](https://github.com/opensearch-project/OpenSearch/pull/17231)) - Bump `com.google.code.gson:gson` from 2.11.0 to 2.12.1 ([#17229](https://github.com/opensearch-project/OpenSearch/pull/17229)) - Bump `org.jruby.joni:joni` from 2.2.1 to 2.2.3 ([#17136](https://github.com/opensearch-project/OpenSearch/pull/17136)) diff --git a/server/build.gradle b/server/build.gradle index cb64d6becb315..fd2cac4c7506f 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -114,7 +114,7 @@ dependencies { // https://mvnrepository.com/artifact/org.roaringbitmap/RoaringBitmap api libs.roaringbitmap - testImplementation 'org.awaitility:awaitility:4.2.2' + testImplementation 'org.awaitility:awaitility:4.3.0' testImplementation(project(":test:framework")) { // tests use the locally compiled version of server exclude group: 'org.opensearch', module: 'server' From db43d0fd5d2bf044c79b17f928673ef6f7db7479 Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Tue, 25 Feb 2025 00:18:19 -0500 Subject: [PATCH 21/48] Updated affiliation for @dblock. (#17450) Signed-off-by: Daniel (dB.) Doubrovkine --- MAINTAINERS.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 8a6890d1ca1c1..887ff654dff96 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -5,7 +5,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje ## Current Maintainers | Maintainer | GitHub ID | Affiliation | -|--------------------------|---------------------------------------------------------|-------------| +| ------------------------ | ------------------------------------------------------- | ----------- | | Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | | Andrew Ross | [andrross](https://github.com/andrross) | Amazon | | Andriy Redko | [reta](https://github.com/reta) | Independent | @@ -15,7 +15,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Charlotte Henkle | [CEHENKLE](https://github.com/CEHENKLE) | Amazon | | Craig Perkins | [cwperks](https://github.com/cwperks) | Amazon | | Dan Widdis | [dbwiddis](https://github.com/dbwiddis) | Amazon | -| Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Amazon | +| Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Independent | | Binlong Gao | [gaobinlong](https://github.com/gaobinlong) | Amazon | | Gaurav Bafna | [gbbafna](https://github.com/gbbafna) | Amazon | | Jay Deng | [jed326](https://github.com/jed326) | Amazon | @@ -35,14 +35,14 @@ This document contains a list of maintainers in this repo. See [opensearch-proje ## Emeritus -| Maintainer | GitHub ID | Affiliation | -| ---------------------- |-------------------------------------------- | ----------- | -| Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | -| Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | -| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | -| Abbas Hussain | [abbashus](https://github.com/abbashus) | Meta | -| Himanshu Setia | [setiah](https://github.com/setiah) | Amazon | -| Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | -| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Independent | -| Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | -| Suraj Singh | [dreamer-89](https://github.com/dreamer-89) | Amazon | +| Maintainer | GitHub ID | Affiliation | +| --------------------- | ------------------------------------------- | ----------- | +| Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | +| Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | +| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | +| Abbas Hussain | [abbashus](https://github.com/abbashus) | Meta | +| Himanshu Setia | [setiah](https://github.com/setiah) | Amazon | +| Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | +| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Independent | +| Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | +| Suraj Singh | [dreamer-89](https://github.com/dreamer-89) | Amazon | From 7e2d2437a14580e985440c140f06680dc4e3cd81 Mon Sep 17 00:00:00 2001 From: panguixin Date: Wed, 26 Feb 2025 00:00:59 +0800 Subject: [PATCH 22/48] Correct the isStored flag for wildcard field type (#17440) Signed-off-by: panguixin --- .../java/org/opensearch/index/mapper/WildcardFieldMapper.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java index 20c5ce87ad1c7..1132c245c6930 100644 --- a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java @@ -315,7 +315,7 @@ public WildcardFieldType(String name, Map meta) { } public WildcardFieldType(String name, NamedAnalyzer normalizer, Builder builder) { - super(name, true, true, builder.hasDocValues.getValue(), TextSearchInfo.SIMPLE_MATCH_ONLY, builder.meta.getValue()); + super(name, true, false, builder.hasDocValues.getValue(), TextSearchInfo.SIMPLE_MATCH_ONLY, builder.meta.getValue()); setIndexAnalyzer(normalizer); this.ignoreAbove = builder.ignoreAbove.getValue(); this.nullValue = builder.nullValue.getValue(); From e39790357453eca32da707106c22cbd1c8ec39d5 Mon Sep 17 00:00:00 2001 From: Lakshya Taragi <157457166+ltaragi@users.noreply.github.com> Date: Wed, 26 Feb 2025 05:00:27 +0530 Subject: [PATCH 23/48] Fix flaky tests from`SegmentReplicationAllocationIT` (#17429) * Fix flaky tests in SegmentReplicationAllocationIT Signed-off-by: Lakshya Taragi * Remove extra logs Signed-off-by: Lakshya Taragi * Account for replicas as well Signed-off-by: Lakshya Taragi * Reduce upper limit on no. of indices Signed-off-by: Lakshya Taragi * Only verified changes Signed-off-by: Lakshya Taragi * Fix testSingleIndexShardAllocation Signed-off-by: Lakshya Taragi --------- Signed-off-by: Lakshya Taragi --- .../SegmentReplicationAllocationIT.java | 29 +++++++++++++------ 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java index 669e24f9fb555..0b2cf93903ed9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -25,6 +25,7 @@ import org.opensearch.test.junit.annotations.TestLogging; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -169,14 +170,16 @@ public void testSingleIndexShardAllocation() throws Exception { // Remove a node internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeNames.get(0))); - ensureGreen(TimeValue.timeValueSeconds(60)); + internalCluster().validateClusterFormed(); + ensureGreen(TimeValue.timeValueSeconds(100)); state = client().admin().cluster().prepareState().execute().actionGet().getState(); logger.info(ShardAllocations.printShardDistribution(state)); verifyPerIndexPrimaryBalance(); // Add a new node internalCluster().startDataOnlyNode(); - ensureGreen(TimeValue.timeValueSeconds(60)); + internalCluster().validateClusterFormed(); + ensureGreen(TimeValue.timeValueSeconds(100)); state = client().admin().cluster().prepareState().execute().actionGet().getState(); logger.info(ShardAllocations.printShardDistribution(state)); verifyPerIndexPrimaryBalance(); @@ -250,12 +253,21 @@ public void testAllocationAndRebalanceWithDisruption() throws Exception { internalCluster().startClusterManagerOnlyNode(); final int maxReplicaCount = 2; final int maxShardCount = 2; - // Create higher number of nodes than number of shards to reduce chances of SameShardAllocationDecider kicking-in + final int numberOfIndices = randomIntBetween(1, 3); + final int maxPossibleShards = numberOfIndices * maxShardCount * (1 + maxReplicaCount); + + List> shardAndReplicaCounts = new ArrayList<>(); + int shardCount, replicaCount, totalShards = 0; + for (int i = 0; i < numberOfIndices; i++) { + shardCount = randomIntBetween(1, maxShardCount); + replicaCount = randomIntBetween(1, maxReplicaCount); + shardAndReplicaCounts.add(Arrays.asList(shardCount, replicaCount)); + totalShards += shardCount * (1 + replicaCount); + } + // Create a strictly higher number of nodes than the number of shards to reduce chances of SameShardAllocationDecider kicking-in // and preventing primary relocations - final int nodeCount = randomIntBetween(5, 10); - final int numberOfIndices = randomIntBetween(1, 10); + final int nodeCount = randomIntBetween(totalShards, maxPossibleShards) + 1; final float buffer = randomIntBetween(1, 4) * 0.10f; - logger.info("--> Creating {} nodes", nodeCount); final List nodeNames = new ArrayList<>(); for (int i = 0; i < nodeCount; i++) { @@ -263,11 +275,10 @@ public void testAllocationAndRebalanceWithDisruption() throws Exception { } setAllocationRelocationStrategy(true, true, buffer); - int shardCount, replicaCount; ClusterState state; for (int i = 0; i < numberOfIndices; i++) { - shardCount = randomIntBetween(1, maxShardCount); - replicaCount = randomIntBetween(1, maxReplicaCount); + shardCount = shardAndReplicaCounts.get(i).get(0); + replicaCount = shardAndReplicaCounts.get(i).get(1); logger.info("--> Creating index test{} with primary {} and replica {}", i, shardCount, replicaCount); createIndex("test" + i, shardCount, replicaCount, i % 2 == 0); ensureGreen(TimeValue.timeValueSeconds(60)); From 171433c2ad253aa42b6773e290ac26f78fb02917 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Wed, 26 Feb 2025 18:49:21 +0530 Subject: [PATCH 24/48] Fix ConcurrentModificationException in RemoteFsTimestampAwareTranslog.trimUnreferencedReaders (#17028) * Fix ConcurrentModificationException in RemoteFsTimestampAwareTranslog.trimUnreferencedReaders Signed-off-by: Sachin Kale * Address PR comments Signed-off-by: Sachin Kale --------- Signed-off-by: Sachin Kale --- .../RemoteFsTimestampAwareTranslog.java | 20 +++++++++---------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java index 99153324b8372..427dbb690448f 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java @@ -125,20 +125,18 @@ protected void trimUnreferencedReaders(boolean indexDeleted, boolean trimLocal) } // Update file tracker to reflect local translog state - Optional minLiveGeneration = readers.stream().map(BaseTranslogReader::getGeneration).min(Long::compareTo); - if (minLiveGeneration.isPresent()) { - List staleFilesInTracker = new ArrayList<>(); - for (String file : fileTransferTracker.allUploaded()) { - if (file.endsWith(TRANSLOG_FILE_SUFFIX)) { - long generation = Translog.parseIdFromFileName(file); - if (generation < minLiveGeneration.get()) { - staleFilesInTracker.add(file); - staleFilesInTracker.add(Translog.getCommitCheckpointFileName(generation)); - } + long minLiveGeneration = getMinFileGeneration(); + List staleFilesInTracker = new ArrayList<>(); + for (String file : fileTransferTracker.allUploaded()) { + if (file.endsWith(TRANSLOG_FILE_SUFFIX)) { + long generation = Translog.parseIdFromFileName(file); + if (generation < minLiveGeneration) { + staleFilesInTracker.add(file); + staleFilesInTracker.add(Translog.getCommitCheckpointFileName(generation)); } - fileTransferTracker.delete(staleFilesInTracker); } } + fileTransferTracker.delete(staleFilesInTracker); // This is to ensure that after the permits are acquired during primary relocation, there are no further modification on remote // store. From 176a4f01dea48a9a5f149ba50c3d51fe6f11e9d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 26 Feb 2025 11:24:30 -0500 Subject: [PATCH 25/48] Bump com.netflix.nebula.ospackage-base from 11.10.1 to 11.11.1 in /distribution/packages (#17374) * Bump com.netflix.nebula.ospackage-base in /distribution/packages Bumps com.netflix.nebula.ospackage-base from 11.10.1 to 11.11.1. --- updated-dependencies: - dependency-name: com.netflix.nebula.ospackage-base dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Craig Perkins Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Craig Perkins --- CHANGELOG.md | 1 + distribution/packages/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 70f8a48d19ba3..e4779231977b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `me.champeau.gradle.japicmp` from 0.4.5 to 0.4.6 ([#17375](https://github.com/opensearch-project/OpenSearch/pull/17375)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.37.1 to 2.52.0 ([#17379](https://github.com/opensearch-project/OpenSearch/pull/17379)) - Bump `net.minidev:json-smart` from 2.5.1 to 2.5.2 ([#17378](https://github.com/opensearch-project/OpenSearch/pull/17378)) +- Bump `com.netflix.nebula.ospackage-base` from 11.10.1 to 11.11.1 ([#17374](https://github.com/opensearch-project/OpenSearch/pull/17374)) ### Changed - Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index ada19dfa38e78..d3cecde24a35d 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.10.1" + id "com.netflix.nebula.ospackage-base" version "11.11.1" } void addProcessFilesTask(String type, boolean jdk) { From 0ffed5e8b743a075f5d66c4fe6e9b8371eacfa14 Mon Sep 17 00:00:00 2001 From: Iwan Igonin <83668556+beanuwave@users.noreply.github.com> Date: Wed, 26 Feb 2025 20:43:38 +0100 Subject: [PATCH 26/48] =?UTF-8?q?Use=20BC=20libraries=20to=20parse=20PEM?= =?UTF-8?q?=20files,=20increase=20key=20length,=20allow=20gener=E2=80=A6?= =?UTF-8?q?=20(#17393)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Use BC libraries to parse PEM files, increase key length, allow general use of known cryptographic binary extensions, remove unused BC dependencies Signed-off-by: Igonin * remove duplicated test permission Signed-off-by: Igonin --------- Signed-off-by: Igonin Co-authored-by: Igonin --- CHANGELOG-3.0.md | 1 + buildSrc/build.gradle | 4 - .../precommit/ForbiddenPatternsTask.java | 5 + .../gradle/testclusters/OpenSearchNode.java | 11 +- client/rest/build.gradle | 4 - distribution/tools/plugin-cli/build.gradle | 4 +- libs/ssl-config/build.gradle | 11 +- .../licenses/bcpkix-jdk18on-1.78.jar.sha1 | 0 .../licenses/bcprov-jdk18on-1.78.jar.sha1 | 0 .../licenses/bcutil-jdk18on-1.78.jar.sha1 | 1 + .../licenses/bouncycastle-LICENSE.txt | 14 + .../licenses/bouncycastle-NOTICE.txt | 1 + .../opensearch/common/ssl/PemKeyConfig.java | 4 +- .../org/opensearch/common/ssl/PemUtils.java | 658 +++--------------- .../common/ssl/SslConfiguration.java | 8 +- .../common/ssl/SslConfigurationLoader.java | 9 +- .../common/ssl/PemKeyConfigTests.java | 15 +- .../common/ssl/PemTrustConfigTests.java | 11 +- .../opensearch/common/ssl/PemUtilsTests.java | 95 ++- .../ssl/SslConfigurationLoaderTests.java | 5 +- .../common/ssl/SslDiagnosticsTests.java | 24 +- .../common/ssl/StoreKeyConfigTests.java | 37 +- .../common/ssl/StoreTrustConfigTests.java | 31 +- .../src/test/resources/certs/README.md | 155 +++++ .../src/test/resources/certs/README.txt | 85 --- .../test/resources/certs/cert-all/certs.p12 | Bin 4757 -> 4895 bytes .../test/resources/certs/cert-all/empty.jks | Bin 0 -> 32 bytes .../resources/certs/cert1/cert1-pkcs1.crt | 19 + .../resources/certs/cert1/cert1-pkcs1.key | 27 + .../resources/certs/cert1/cert1-pkcs8.key | 28 - .../src/test/resources/certs/cert1/cert1.crt | 34 +- .../src/test/resources/certs/cert1/cert1.key | 55 +- .../src/test/resources/certs/cert1/cert1.p12 | Bin 2456 -> 2606 bytes .../resources/certs/cert2/cert2-pkcs1.crt | 19 + .../resources/certs/cert2/cert2-pkcs1.key | 30 + .../resources/certs/cert2/cert2-pkcs8.key | 29 - .../src/test/resources/certs/cert2/cert2.crt | 34 +- .../src/test/resources/certs/cert2/cert2.key | 60 +- .../src/test/resources/certs/cert2/cert2.p12 | Bin 2456 -> 2606 bytes .../test/resources/certs/pem-utils/README.md | 108 ++- .../pem-utils/dsa_key_openssl_encrypted.pem | 30 +- .../certs/pem-utils/dsa_key_openssl_plain.pem | 28 +- .../dsa_key_openssl_plain_with_params.pem | 28 +- .../pem-utils/dsa_key_pkcs8_encrypted.pem | 18 + .../certs/pem-utils/dsa_key_pkcs8_plain.pem | 20 +- .../pem-utils/ec_key_openssl_encrypted.pem | 7 +- .../certs/pem-utils/ec_key_openssl_plain.pem | 5 +- .../ec_key_openssl_plain_with_params.pem | 7 +- .../pem-utils/ec_key_pkcs8_encrypted.pem | 6 + .../certs/pem-utils/key_DSA_enc_pbkdf2.pem | 18 + .../certs/pem-utils/key_EC_enc_pbkdf2.pem | 6 + .../certs/pem-utils/key_PKCS8_enc_pbkdf2.pem | 30 + .../resources/certs/pem-utils/testnode.jks | Bin 9360 -> 15253 bytes modules/reindex/build.gradle | 5 - .../reindex/ReindexRestClientSslTests.java | 10 +- .../org/opensearch/index/reindex/README.md | 48 ++ .../org/opensearch/index/reindex/README.txt | 16 - .../org/opensearch/index/reindex/ca.key | 30 + .../org/opensearch/index/reindex/ca.pem | 43 +- .../index/reindex/client/client.crt | 35 +- .../index/reindex/client/client.key | 60 +- .../opensearch/index/reindex/http/http.crt | 38 +- .../opensearch/index/reindex/http/http.key | 60 +- .../SecureNetty4HttpServerTransportTests.java | 2 +- .../ssl/SimpleSecureNetty4TransportTests.java | 2 +- .../src/test/resources/README.md | 26 + .../src/test/resources/README.txt | 17 - .../src/test/resources/netty4-secure.jks | Bin 2790 -> 2790 bytes .../src/test/resources/netty4-secure.p12 | Bin 0 -> 2790 bytes .../AzureDiscoveryClusterFormationTests.java | 4 +- plugins/ingest-attachment/build.gradle | 3 - .../licenses/bcmail-jdk18on-1.78.jar.sha1 | 1 - .../licenses/bcmail-jdk18on-LICENSE.txt | 23 - .../licenses/bcmail-jdk18on-NOTICE.txt | 0 .../licenses/bcpkix-jdk18on-LICENSE.txt | 23 - .../licenses/bcpkix-jdk18on-NOTICE.txt | 0 .../licenses/bcprov-jdk18on-LICENSE.txt | 22 - .../licenses/bcprov-jdk18on-NOTICE.txt | 0 plugins/repository-gcs/build.gradle | 2 +- .../gcs/GoogleCloudStorageServiceTests.java | 2 +- .../repositories/gcs/TestUtils.java | 2 +- ...ReactorNetty4HttpServerTransportTests.java | 16 +- .../src/test/resources/README.txt | 14 - .../src/test/resources/certificate.crt | 22 - .../src/test/resources/certificate.key | 28 - qa/evil-tests/build.gradle | 4 +- .../org/opensearch/bootstrap/test.policy | 21 +- .../resources/provision/kdc.conf.template | 7 +- .../resources/provision/krb5.conf.template | 13 +- test/framework/build.gradle | 3 + .../licenses/bcpkix-jdk18on-1.78.jar.sha1 | 1 + .../licenses/bcprov-jdk18on-1.78.jar.sha1 | 1 + .../licenses/bouncycastle-LICENSE.txt | 14 + .../licenses/bouncycastle-NOTICE.txt | 1 + .../org/opensearch/test/KeyStoreUtils.java | 68 ++ 95 files changed, 1214 insertions(+), 1312 deletions(-) rename {plugins/ingest-attachment => libs/ssl-config}/licenses/bcpkix-jdk18on-1.78.jar.sha1 (100%) rename {plugins/ingest-attachment => libs/ssl-config}/licenses/bcprov-jdk18on-1.78.jar.sha1 (100%) create mode 100644 libs/ssl-config/licenses/bcutil-jdk18on-1.78.jar.sha1 create mode 100644 libs/ssl-config/licenses/bouncycastle-LICENSE.txt create mode 100644 libs/ssl-config/licenses/bouncycastle-NOTICE.txt create mode 100644 libs/ssl-config/src/test/resources/certs/README.md delete mode 100644 libs/ssl-config/src/test/resources/certs/README.txt create mode 100644 libs/ssl-config/src/test/resources/certs/cert-all/empty.jks create mode 100644 libs/ssl-config/src/test/resources/certs/cert1/cert1-pkcs1.crt create mode 100644 libs/ssl-config/src/test/resources/certs/cert1/cert1-pkcs1.key delete mode 100644 libs/ssl-config/src/test/resources/certs/cert1/cert1-pkcs8.key create mode 100644 libs/ssl-config/src/test/resources/certs/cert2/cert2-pkcs1.crt create mode 100644 libs/ssl-config/src/test/resources/certs/cert2/cert2-pkcs1.key delete mode 100644 libs/ssl-config/src/test/resources/certs/cert2/cert2-pkcs8.key create mode 100644 libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_encrypted.pem create mode 100644 libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_pkcs8_encrypted.pem create mode 100644 libs/ssl-config/src/test/resources/certs/pem-utils/key_DSA_enc_pbkdf2.pem create mode 100644 libs/ssl-config/src/test/resources/certs/pem-utils/key_EC_enc_pbkdf2.pem create mode 100644 libs/ssl-config/src/test/resources/certs/pem-utils/key_PKCS8_enc_pbkdf2.pem create mode 100644 modules/reindex/src/test/resources/org/opensearch/index/reindex/README.md delete mode 100644 modules/reindex/src/test/resources/org/opensearch/index/reindex/README.txt create mode 100644 modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.key create mode 100644 modules/transport-netty4/src/test/resources/README.md delete mode 100644 modules/transport-netty4/src/test/resources/README.txt create mode 100644 modules/transport-netty4/src/test/resources/netty4-secure.p12 delete mode 100644 plugins/ingest-attachment/licenses/bcmail-jdk18on-1.78.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/bcmail-jdk18on-LICENSE.txt delete mode 100644 plugins/ingest-attachment/licenses/bcmail-jdk18on-NOTICE.txt delete mode 100644 plugins/ingest-attachment/licenses/bcpkix-jdk18on-LICENSE.txt delete mode 100644 plugins/ingest-attachment/licenses/bcpkix-jdk18on-NOTICE.txt delete mode 100644 plugins/ingest-attachment/licenses/bcprov-jdk18on-LICENSE.txt delete mode 100644 plugins/ingest-attachment/licenses/bcprov-jdk18on-NOTICE.txt delete mode 100644 plugins/transport-reactor-netty4/src/test/resources/README.txt delete mode 100644 plugins/transport-reactor-netty4/src/test/resources/certificate.crt delete mode 100644 plugins/transport-reactor-netty4/src/test/resources/certificate.key create mode 100644 test/framework/licenses/bcpkix-jdk18on-1.78.jar.sha1 create mode 100644 test/framework/licenses/bcprov-jdk18on-1.78.jar.sha1 create mode 100644 test/framework/licenses/bouncycastle-LICENSE.txt create mode 100644 test/framework/licenses/bouncycastle-NOTICE.txt create mode 100644 test/framework/src/main/java/org/opensearch/test/KeyStoreUtils.java diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index e4ae38e8da2ae..4c366d0c7714f 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -44,6 +44,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Refactor the `:server` module `org.opensearch.client` to `org.opensearch.transport.client` to eliminate top level split packages for JPMS support ([#17272](https://github.com/opensearch-project/OpenSearch/pull/17272)) - Use Lucene `BM25Similarity` as default since the `LegacyBM25Similarity` is marked as deprecated ([#17306](https://github.com/opensearch-project/OpenSearch/pull/17306)) - Wildcard field index only 3gram of the input data [#17349](https://github.com/opensearch-project/OpenSearch/pull/17349) +- Use BC libraries to parse PEM files, increase key length, allow general use of known cryptographic binary extensions, remove unused BC dependencies ([#3420](https://github.com/opensearch-project/OpenSearch/pull/14912)) ### Deprecated diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 6e30bb0199086..65986f2361c9d 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -229,12 +229,8 @@ if (project != rootProject) { forbiddenPatterns { exclude '**/*.wav' - exclude '**/*.p12' - exclude '**/*.jks' - exclude '**/*.crt' // the file that actually defines nocommit exclude '**/ForbiddenPatternsTask.java' - exclude '**/*.bcfks' } testingConventions { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java index 1790b32fb2f36..fbf96483443ee 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java @@ -83,8 +83,13 @@ public class ForbiddenPatternsTask extends DefaultTask { .exclude("**/*.ico") .exclude("**/*.jar") .exclude("**/*.zip") + .exclude("**/*.p12") .exclude("**/*.jks") .exclude("**/*.crt") + .exclude("**/*.der") + .exclude("**/*.pem") + .exclude("**/*.key") + .exclude("**/*.bcfks") .exclude("**/*.keystore") .exclude("**/*.png"); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java index aaa2daef2a158..c7af3d0a155f7 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java @@ -548,7 +548,7 @@ public synchronized void start() { logToProcessStdout("Creating opensearch keystore with password set to [" + keystorePassword + "]"); if (keystorePassword.length() > 0) { - runOpenSearchBinScriptWithInput(keystorePassword + "\n" + keystorePassword, "opensearch-keystore", "create", "-p"); + runOpenSearchBinScriptWithInput(keystorePassword + "\n" + keystorePassword + "\n", "opensearch-keystore", "create", "-p"); } else { runOpenSearchBinScript("opensearch-keystore", "-v", "create"); } @@ -556,7 +556,7 @@ public synchronized void start() { if (keystoreSettings.isEmpty() == false || keystoreFiles.isEmpty() == false) { logToProcessStdout("Adding " + keystoreSettings.size() + " keystore settings and " + keystoreFiles.size() + " keystore files"); - keystoreSettings.forEach((key, value) -> runKeystoreCommandWithPassword(keystorePassword, value.toString(), "add", "-x", key)); + keystoreSettings.forEach((key, value) -> runKeystoreCommandWithPassword(keystorePassword, value.toString(), "add", key)); for (Map.Entry entry : keystoreFiles.entrySet()) { File file = entry.getValue(); @@ -738,7 +738,12 @@ private void runOpenSearchBinScriptWithInput(String input, String tool, CharSequ } private void runKeystoreCommandWithPassword(String keystorePassword, String input, CharSequence... args) { - final String actualInput = keystorePassword.length() > 0 ? keystorePassword + "\n" + input : input; + final String actualInput; + if (keystorePassword.length() > 0) { + actualInput = keystorePassword + "\n" + input + "\n" + input; + } else { + actualInput = input + "\n" + input; + } runOpenSearchBinScriptWithInput(actualInput, "opensearch-keystore", args); } diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 93faf0024b51e..29d76e6910ee3 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -75,10 +75,6 @@ tasks.withType(CheckForbiddenApis).configureEach { replaceSignatureFiles('jdk-signatures', 'http-signatures') } -forbiddenPatterns { - exclude '**/*.der' -} - tasks.named('forbiddenApisTest').configure { //we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage bundledSignatures -= 'jdk-non-portable' diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 784cdc457a1a9..ecb86ecb1eb0b 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -39,7 +39,9 @@ dependencies { compileOnly project(":libs:opensearch-cli") api "org.bouncycastle:bcpg-fips:2.0.9" api "org.bouncycastle:bc-fips:2.0.0" - testImplementation project(":test:framework") + testImplementation(project(":test:framework")) { + exclude group: 'org.bouncycastle' + } testImplementation 'com.google.jimfs:jimfs:1.3.0' testRuntimeOnly("com.google.guava:guava:${versions.guava}") { transitive = false diff --git a/libs/ssl-config/build.gradle b/libs/ssl-config/build.gradle index 3226ec12ff6f7..da0829cb533da 100644 --- a/libs/ssl-config/build.gradle +++ b/libs/ssl-config/build.gradle @@ -34,6 +34,9 @@ apply plugin: "opensearch.publish" dependencies { api project(':libs:opensearch-common') + api "org.bouncycastle:bcprov-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bcpkix-jdk18on:${versions.bouncycastle}" + runtimeOnly "org.bouncycastle:bcutil-jdk18on:${versions.bouncycastle}" testImplementation(project(":test:framework")) { exclude group: 'org.opensearch', module: 'opensearch-ssl-config' @@ -44,16 +47,12 @@ dependencies { testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" } - tasks.named('forbiddenApisMain').configure { replaceSignatureFiles 'jdk-signatures' } -forbiddenPatterns { - exclude '**/*.key' - exclude '**/*.pem' - exclude '**/*.p12' - exclude '**/*.jks' +tasks.named("dependencyLicenses").configure { + mapping from: /bc.*/, to: 'bouncycastle' } tasks.test { diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.78.jar.sha1 b/libs/ssl-config/licenses/bcpkix-jdk18on-1.78.jar.sha1 similarity index 100% rename from plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.78.jar.sha1 rename to libs/ssl-config/licenses/bcpkix-jdk18on-1.78.jar.sha1 diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.78.jar.sha1 b/libs/ssl-config/licenses/bcprov-jdk18on-1.78.jar.sha1 similarity index 100% rename from plugins/ingest-attachment/licenses/bcprov-jdk18on-1.78.jar.sha1 rename to libs/ssl-config/licenses/bcprov-jdk18on-1.78.jar.sha1 diff --git a/libs/ssl-config/licenses/bcutil-jdk18on-1.78.jar.sha1 b/libs/ssl-config/licenses/bcutil-jdk18on-1.78.jar.sha1 new file mode 100644 index 0000000000000..9c88eef3ace17 --- /dev/null +++ b/libs/ssl-config/licenses/bcutil-jdk18on-1.78.jar.sha1 @@ -0,0 +1 @@ +81c1f5e06f206be5dad137d563609dbe66c81d31 \ No newline at end of file diff --git a/libs/ssl-config/licenses/bouncycastle-LICENSE.txt b/libs/ssl-config/licenses/bouncycastle-LICENSE.txt new file mode 100644 index 0000000000000..5c7c14696849d --- /dev/null +++ b/libs/ssl-config/licenses/bouncycastle-LICENSE.txt @@ -0,0 +1,14 @@ +Copyright (c) 2000 - 2023 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/libs/ssl-config/licenses/bouncycastle-NOTICE.txt b/libs/ssl-config/licenses/bouncycastle-NOTICE.txt new file mode 100644 index 0000000000000..8b137891791fe --- /dev/null +++ b/libs/ssl-config/licenses/bouncycastle-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemKeyConfig.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemKeyConfig.java index bfc29a5801b11..d957ffa457149 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemKeyConfig.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemKeyConfig.java @@ -32,6 +32,8 @@ package org.opensearch.common.ssl; +import org.bouncycastle.pkcs.PKCSException; + import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.X509ExtendedKeyManager; @@ -91,7 +93,7 @@ private PrivateKey getPrivateKey() { throw new SslConfigException("the configured ssl private key file [" + key.toAbsolutePath() + "] does not exist", e); } catch (IOException e) { throw new SslConfigException("the configured ssl private key file [" + key.toAbsolutePath() + "] cannot be read", e); - } catch (GeneralSecurityException e) { + } catch (PKCSException e) { throw new SslConfigException("cannot load ssl private key file [" + key.toAbsolutePath() + "]", e); } } diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java index 8a3730ee554f9..441e17b808feb 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java @@ -32,628 +32,136 @@ package org.opensearch.common.ssl; -import org.opensearch.common.CharArrays; +import org.bouncycastle.asn1.ASN1ObjectIdentifier; +import org.bouncycastle.asn1.pkcs.PrivateKeyInfo; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.openssl.PEMEncryptedKeyPair; +import org.bouncycastle.openssl.PEMKeyPair; +import org.bouncycastle.openssl.PEMParser; +import org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter; +import org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder; +import org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo; +import org.bouncycastle.pkcs.PKCSException; +import org.bouncycastle.pkcs.jcajce.JcePKCSPBEInputDecryptorProviderBuilder; -import javax.crypto.Cipher; -import javax.crypto.EncryptedPrivateKeyInfo; -import javax.crypto.SecretKey; -import javax.crypto.SecretKeyFactory; -import javax.crypto.spec.IvParameterSpec; -import javax.crypto.spec.PBEKeySpec; -import javax.crypto.spec.SecretKeySpec; - -import java.io.BufferedReader; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; -import java.math.BigInteger; import java.nio.charset.StandardCharsets; import java.nio.file.Files; -import java.nio.file.NoSuchFileException; import java.nio.file.Path; -import java.security.GeneralSecurityException; -import java.security.KeyFactory; -import java.security.KeyPairGenerator; -import java.security.MessageDigest; import java.security.PrivateKey; +import java.security.Provider; import java.security.cert.Certificate; import java.security.cert.CertificateException; import java.security.cert.CertificateFactory; -import java.security.interfaces.ECKey; -import java.security.spec.AlgorithmParameterSpec; -import java.security.spec.DSAPrivateKeySpec; -import java.security.spec.ECGenParameterSpec; -import java.security.spec.ECParameterSpec; -import java.security.spec.ECPrivateKeySpec; -import java.security.spec.PKCS8EncodedKeySpec; -import java.security.spec.RSAPrivateCrtKeySpec; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Base64; import java.util.Collection; -import java.util.HashMap; import java.util.List; -import java.util.Map; +import java.util.Locale; import java.util.function.Supplier; final class PemUtils { - private static final String PKCS1_HEADER = "-----BEGIN RSA PRIVATE KEY-----"; - private static final String PKCS1_FOOTER = "-----END RSA PRIVATE KEY-----"; - private static final String OPENSSL_DSA_HEADER = "-----BEGIN DSA PRIVATE KEY-----"; - private static final String OPENSSL_DSA_FOOTER = "-----END DSA PRIVATE KEY-----"; - private static final String OPENSSL_DSA_PARAMS_HEADER = "-----BEGIN DSA PARAMETERS-----"; - private static final String OPENSSL_DSA_PARAMS_FOOTER = "-----END DSA PARAMETERS-----"; - private static final String PKCS8_HEADER = "-----BEGIN PRIVATE KEY-----"; - private static final String PKCS8_FOOTER = "-----END PRIVATE KEY-----"; - private static final String PKCS8_ENCRYPTED_HEADER = "-----BEGIN ENCRYPTED PRIVATE KEY-----"; - private static final String PKCS8_ENCRYPTED_FOOTER = "-----END ENCRYPTED PRIVATE KEY-----"; - private static final String OPENSSL_EC_HEADER = "-----BEGIN EC PRIVATE KEY-----"; - private static final String OPENSSL_EC_FOOTER = "-----END EC PRIVATE KEY-----"; - private static final String OPENSSL_EC_PARAMS_HEADER = "-----BEGIN EC PARAMETERS-----"; - private static final String OPENSSL_EC_PARAMS_FOOTER = "-----END EC PARAMETERS-----"; - private static final String HEADER = "-----BEGIN"; + private static final Provider BC = new BouncyCastleProvider(); - private PemUtils() { + PemUtils() { throw new IllegalStateException("Utility class should not be instantiated"); } /** * Creates a {@link PrivateKey} from the contents of a file. Supports PKCS#1, PKCS#8 - * encoded formats of encrypted and plaintext RSA, DSA and EC(secp256r1) keys + * encoded formats of encrypted and plaintext RSA, DSA and EC(secp256r1) keys. * * @param keyPath the path for the key file - * @param passwordSupplier A password supplier for the potentially encrypted (password protected) key + * @param passwordSupplier A password supplier for the potentially encrypted (password protected) key. Unencrypted keys ignore this value. * @return a private key from the contents of the file */ - public static PrivateKey readPrivateKey(Path keyPath, Supplier passwordSupplier) throws IOException, GeneralSecurityException { - try (BufferedReader bReader = Files.newBufferedReader(keyPath, StandardCharsets.UTF_8)) { - String line = bReader.readLine(); - while (null != line && line.startsWith(HEADER) == false) { - line = bReader.readLine(); - } - if (null == line) { - throw new SslConfigException("Error parsing Private Key [" + keyPath.toAbsolutePath() + "], file is empty"); - } - if (PKCS8_ENCRYPTED_HEADER.equals(line.trim())) { - char[] password = passwordSupplier.get(); - if (password == null) { - throw new SslConfigException("cannot read encrypted key [" + keyPath.toAbsolutePath() + "] without a password"); - } - return parsePKCS8Encrypted(bReader, password); - } else if (PKCS8_HEADER.equals(line.trim())) { - return parsePKCS8(bReader); - } else if (PKCS1_HEADER.equals(line.trim())) { - return parsePKCS1Rsa(bReader, passwordSupplier); - } else if (OPENSSL_DSA_HEADER.equals(line.trim())) { - return parseOpenSslDsa(bReader, passwordSupplier); - } else if (OPENSSL_DSA_PARAMS_HEADER.equals(line.trim())) { - return parseOpenSslDsa(removeDsaHeaders(bReader), passwordSupplier); - } else if (OPENSSL_EC_HEADER.equals(line.trim())) { - return parseOpenSslEC(bReader, passwordSupplier); - } else if (OPENSSL_EC_PARAMS_HEADER.equals(line.trim())) { - return parseOpenSslEC(removeECHeaders(bReader), passwordSupplier); - } else { - throw new SslConfigException( - "error parsing Private Key [" + keyPath.toAbsolutePath() + "], file does not contain a supported key format" - ); - } - } catch (FileNotFoundException | NoSuchFileException e) { - throw new SslConfigException("private key file [" + keyPath.toAbsolutePath() + "] does not exist", e); - } catch (IOException | GeneralSecurityException e) { - throw new SslConfigException("private key file [" + keyPath.toAbsolutePath() + "] cannot be parsed", e); - } - } - - /** - * Removes the EC Headers that OpenSSL adds to EC private keys as the information in them - * is redundant - * - * @throws IOException if the EC Parameter footer is missing - */ - private static BufferedReader removeECHeaders(BufferedReader bReader) throws IOException { - String line = bReader.readLine(); - while (line != null) { - if (OPENSSL_EC_PARAMS_FOOTER.equals(line.trim())) { - break; - } - line = bReader.readLine(); - } - if (null == line || OPENSSL_EC_PARAMS_FOOTER.equals(line.trim()) == false) { - throw new IOException("Malformed PEM file, EC Parameters footer is missing"); - } - // Verify that the key starts with the correct header before passing it to parseOpenSslEC - if (OPENSSL_EC_HEADER.equals(bReader.readLine()) == false) { - throw new IOException("Malformed PEM file, EC Key header is missing"); - } - return bReader; - } - - /** - * Removes the DSA Params Headers that OpenSSL adds to DSA private keys as the information in them - * is redundant - * - * @throws IOException if the EC Parameter footer is missing - */ - private static BufferedReader removeDsaHeaders(BufferedReader bReader) throws IOException { - String line = bReader.readLine(); - while (line != null) { - if (OPENSSL_DSA_PARAMS_FOOTER.equals(line.trim())) { - break; - } - line = bReader.readLine(); - } - if (null == line || OPENSSL_DSA_PARAMS_FOOTER.equals(line.trim()) == false) { - throw new IOException("Malformed PEM file, DSA Parameters footer is missing"); - } - // Verify that the key starts with the correct header before passing it to parseOpenSslDsa - if (OPENSSL_DSA_HEADER.equals(bReader.readLine()) == false) { - throw new IOException("Malformed PEM file, DSA Key header is missing"); - } - return bReader; - } - - /** - * Creates a {@link PrivateKey} from the contents of {@code bReader} that contains an plaintext private key encoded in - * PKCS#8 - * - * @param bReader the {@link BufferedReader} containing the key file contents - * @return {@link PrivateKey} - * @throws IOException if the file can't be read - * @throws GeneralSecurityException if the private key can't be generated from the {@link PKCS8EncodedKeySpec} - */ - private static PrivateKey parsePKCS8(BufferedReader bReader) throws IOException, GeneralSecurityException { - StringBuilder sb = new StringBuilder(); - String line = bReader.readLine(); - while (line != null) { - if (PKCS8_FOOTER.equals(line.trim())) { - break; - } - sb.append(line.trim()); - line = bReader.readLine(); - } - if (null == line || PKCS8_FOOTER.equals(line.trim()) == false) { - throw new IOException("Malformed PEM file, PEM footer is invalid or missing"); - } - byte[] keyBytes = Base64.getDecoder().decode(sb.toString()); - String keyAlgo = getKeyAlgorithmIdentifier(keyBytes); - KeyFactory keyFactory = KeyFactory.getInstance(keyAlgo); - return keyFactory.generatePrivate(new PKCS8EncodedKeySpec(keyBytes)); + public static PrivateKey readPrivateKey(Path keyPath, Supplier passwordSupplier) throws IOException, PKCSException { + PrivateKeyInfo pki = loadPrivateKeyFromFile(keyPath, passwordSupplier); + JcaPEMKeyConverter converter = new JcaPEMKeyConverter(); + return converter.getPrivateKey(pki); } - /** - * Creates a {@link PrivateKey} from the contents of {@code bReader} that contains an EC private key encoded in - * OpenSSL traditional format. - * - * @param bReader the {@link BufferedReader} containing the key file contents - * @param passwordSupplier A password supplier for the potentially encrypted (password protected) key - * @return {@link PrivateKey} - * @throws IOException if the file can't be read - * @throws GeneralSecurityException if the private key can't be generated from the {@link ECPrivateKeySpec} - */ - private static PrivateKey parseOpenSslEC(BufferedReader bReader, Supplier passwordSupplier) throws IOException, - GeneralSecurityException { - StringBuilder sb = new StringBuilder(); - String line = bReader.readLine(); - Map pemHeaders = new HashMap<>(); - while (line != null) { - if (OPENSSL_EC_FOOTER.equals(line.trim())) { - break; - } - // Parse PEM headers according to https://www.ietf.org/rfc/rfc1421.txt - if (line.contains(":")) { - String[] header = line.split(":"); - pemHeaders.put(header[0].trim(), header[1].trim()); - } else { - sb.append(line.trim()); - } - line = bReader.readLine(); - } - if (null == line || OPENSSL_EC_FOOTER.equals(line.trim()) == false) { - throw new IOException("Malformed PEM file, PEM footer is invalid or missing"); - } - byte[] keyBytes = possiblyDecryptPKCS1Key(pemHeaders, sb.toString(), passwordSupplier); - KeyFactory keyFactory = KeyFactory.getInstance("EC"); - ECPrivateKeySpec ecSpec = parseEcDer(keyBytes); - return keyFactory.generatePrivate(ecSpec); - } - - /** - * Creates a {@link PrivateKey} from the contents of {@code bReader} that contains an RSA private key encoded in - * OpenSSL traditional format. - * - * @param bReader the {@link BufferedReader} containing the key file contents - * @param passwordSupplier A password supplier for the potentially encrypted (password protected) key - * @return {@link PrivateKey} - * @throws IOException if the file can't be read - * @throws GeneralSecurityException if the private key can't be generated from the {@link RSAPrivateCrtKeySpec} - */ - private static PrivateKey parsePKCS1Rsa(BufferedReader bReader, Supplier passwordSupplier) throws IOException, - GeneralSecurityException { - StringBuilder sb = new StringBuilder(); - String line = bReader.readLine(); - Map pemHeaders = new HashMap<>(); - - while (line != null) { - if (PKCS1_FOOTER.equals(line.trim())) { - // Unencrypted - break; - } - // Parse PEM headers according to https://www.ietf.org/rfc/rfc1421.txt - if (line.contains(":")) { - String[] header = line.split(":"); - pemHeaders.put(header[0].trim(), header[1].trim()); - } else { - sb.append(line.trim()); + static List readCertificates(Collection certPaths) throws CertificateException, IOException { + CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); + List certificates = new ArrayList<>(certPaths.size()); + for (Path path : certPaths) { + try (InputStream input = Files.newInputStream(path)) { + final Collection parsed = certFactory.generateCertificates(input); + if (parsed.isEmpty()) { + throw new SslConfigException("Failed to parse any certificate from [" + path.toAbsolutePath() + "]"); + } + certificates.addAll(parsed); } - line = bReader.readLine(); } - if (null == line || PKCS1_FOOTER.equals(line.trim()) == false) { - throw new IOException("Malformed PEM file, PEM footer is invalid or missing"); - } - byte[] keyBytes = possiblyDecryptPKCS1Key(pemHeaders, sb.toString(), passwordSupplier); - RSAPrivateCrtKeySpec spec = parseRsaDer(keyBytes); - KeyFactory keyFactory = KeyFactory.getInstance("RSA"); - return keyFactory.generatePrivate(spec); + return certificates; } /** - * Creates a {@link PrivateKey} from the contents of {@code bReader} that contains an DSA private key encoded in - * OpenSSL traditional format. + * Creates a {@link PrivateKey} from the private key, with or without encryption. + * When enforcing the approved-only mode in Java security settings, some functionalities might be restricted due to the limited + * set of allowed algorithms. One such restriction includes Password Based Key Derivation Functions (PBKDF) like those used by OpenSSL + * and PKCS#12 formats. Since these formats rely on PBKDF algorithms, they cannot operate correctly within the approved-only mode. + * Consequently, attempting to utilize them could result in a {@link java.security.NoSuchAlgorithmException}. * - * @param bReader the {@link BufferedReader} containing the key file contents - * @param passwordSupplier A password supplier for the potentially encrypted (password protected) key + * @param passwordSupplier The password supplier for the encrypted (password protected) key * @return {@link PrivateKey} - * @throws IOException if the file can't be read - * @throws GeneralSecurityException if the private key can't be generated from the {@link DSAPrivateKeySpec} - */ - private static PrivateKey parseOpenSslDsa(BufferedReader bReader, Supplier passwordSupplier) throws IOException, - GeneralSecurityException { - StringBuilder sb = new StringBuilder(); - String line = bReader.readLine(); - Map pemHeaders = new HashMap<>(); - - while (line != null) { - if (OPENSSL_DSA_FOOTER.equals(line.trim())) { - // Unencrypted - break; - } - // Parse PEM headers according to https://www.ietf.org/rfc/rfc1421.txt - if (line.contains(":")) { - String[] header = line.split(":"); - pemHeaders.put(header[0].trim(), header[1].trim()); + * @throws IOException If the file can't be read + */ + private static PrivateKeyInfo loadPrivateKeyFromFile(Path keyPath, Supplier passwordSupplier) throws IOException, + PKCSException { + + try (PEMParser pemParser = new PEMParser(Files.newBufferedReader(keyPath, StandardCharsets.UTF_8))) { + Object object = readObject(keyPath, pemParser); + + if (object instanceof PKCS8EncryptedPrivateKeyInfo) { // encrypted private key in pkcs8-format + var privateKeyInfo = (PKCS8EncryptedPrivateKeyInfo) object; + var inputDecryptorProvider = new JcePKCSPBEInputDecryptorProviderBuilder().setProvider(BC).build(passwordSupplier.get()); + return privateKeyInfo.decryptPrivateKeyInfo(inputDecryptorProvider); + } else if (object instanceof PEMEncryptedKeyPair) { // encrypted private key + var encryptedKeyPair = (PEMEncryptedKeyPair) object; + var decryptorProvider = new JcePEMDecryptorProviderBuilder().setProvider(BC).build(passwordSupplier.get()); + var keyPair = encryptedKeyPair.decryptKeyPair(decryptorProvider); + return keyPair.getPrivateKeyInfo(); + } else if (object instanceof PEMKeyPair) { // unencrypted private key + return ((PEMKeyPair) object).getPrivateKeyInfo(); + } else if (object instanceof PrivateKeyInfo) { // unencrypted private key in pkcs8-format + return (PrivateKeyInfo) object; } else { - sb.append(line.trim()); - } - line = bReader.readLine(); - } - if (null == line || OPENSSL_DSA_FOOTER.equals(line.trim()) == false) { - throw new IOException("Malformed PEM file, PEM footer is invalid or missing"); - } - byte[] keyBytes = possiblyDecryptPKCS1Key(pemHeaders, sb.toString(), passwordSupplier); - DSAPrivateKeySpec spec = parseDsaDer(keyBytes); - KeyFactory keyFactory = KeyFactory.getInstance("DSA"); - return keyFactory.generatePrivate(spec); - } - - /** - * Creates a {@link PrivateKey} from the contents of {@code bReader} that contains an encrypted private key encoded in - * PKCS#8 - * - * @param bReader the {@link BufferedReader} containing the key file contents - * @param keyPassword The password for the encrypted (password protected) key - * @return {@link PrivateKey} - * @throws IOException if the file can't be read - * @throws GeneralSecurityException if the private key can't be generated from the {@link PKCS8EncodedKeySpec} - */ - private static PrivateKey parsePKCS8Encrypted(BufferedReader bReader, char[] keyPassword) throws IOException, GeneralSecurityException { - StringBuilder sb = new StringBuilder(); - String line = bReader.readLine(); - while (line != null) { - if (PKCS8_ENCRYPTED_FOOTER.equals(line.trim())) { - break; - } - sb.append(line.trim()); - line = bReader.readLine(); - } - if (null == line || PKCS8_ENCRYPTED_FOOTER.equals(line.trim()) == false) { - throw new IOException("Malformed PEM file, PEM footer is invalid or missing"); - } - byte[] keyBytes = Base64.getDecoder().decode(sb.toString()); - - EncryptedPrivateKeyInfo encryptedPrivateKeyInfo = new EncryptedPrivateKeyInfo(keyBytes); - SecretKeyFactory secretKeyFactory = SecretKeyFactory.getInstance(encryptedPrivateKeyInfo.getAlgName()); - SecretKey secretKey = secretKeyFactory.generateSecret(new PBEKeySpec(keyPassword)); - Cipher cipher = Cipher.getInstance(encryptedPrivateKeyInfo.getAlgName()); - cipher.init(Cipher.DECRYPT_MODE, secretKey, encryptedPrivateKeyInfo.getAlgParameters()); - PKCS8EncodedKeySpec keySpec = encryptedPrivateKeyInfo.getKeySpec(cipher); - String keyAlgo = getKeyAlgorithmIdentifier(keySpec.getEncoded()); - KeyFactory keyFactory = KeyFactory.getInstance(keyAlgo); - return keyFactory.generatePrivate(keySpec); - } - - /** - * Decrypts the password protected contents using the algorithm and IV that is specified in the PEM Headers of the file - * - * @param pemHeaders The Proc-Type and DEK-Info PEM headers that have been extracted from the key file - * @param keyContents The key as a base64 encoded String - * @param passwordSupplier A password supplier for the encrypted (password protected) key - * @return the decrypted key bytes - * @throws GeneralSecurityException if the key can't be decrypted - * @throws IOException if the PEM headers are missing or malformed - */ - private static byte[] possiblyDecryptPKCS1Key(Map pemHeaders, String keyContents, Supplier passwordSupplier) - throws GeneralSecurityException, IOException { - byte[] keyBytes = Base64.getDecoder().decode(keyContents); - String procType = pemHeaders.get("Proc-Type"); - if ("4,ENCRYPTED".equals(procType)) { - // We only handle PEM encryption - String encryptionParameters = pemHeaders.get("DEK-Info"); - if (null == encryptionParameters) { - // malformed pem - throw new IOException("Malformed PEM File, DEK-Info header is missing"); - } - char[] password = passwordSupplier.get(); - if (password == null) { - throw new IOException("cannot read encrypted key without a password"); + throw new SslConfigException( + String.format( + Locale.ROOT, + "error parsing private key [%s], invalid encrypted private key class: [%s]", + keyPath.toAbsolutePath(), + object.getClass().getName() + ) + ); } - Cipher cipher = getCipherFromParameters(encryptionParameters, password); - byte[] decryptedKeyBytes = cipher.doFinal(keyBytes); - return decryptedKeyBytes; } - return keyBytes; } /** - * Creates a {@link Cipher} from the contents of the DEK-Info header of a PEM file. RFC 1421 indicates that supported algorithms are - * defined in RFC 1423. RFC 1423 only defines DES-CBS and triple DES (EDE) in CBC mode. AES in CBC mode is also widely used though ( 3 - * different variants of 128, 192, 256 bit keys ) + * Supports PEM files that includes parameters. * - * @param dekHeaderValue The value of the DEK-Info PEM header - * @param password The password with which the key is encrypted - * @return a cipher of the appropriate algorithm and parameters to be used for decryption - * @throws GeneralSecurityException if the algorithm is not available in the used security provider, or if the key is inappropriate - * for the cipher - * @throws IOException if the DEK-Info PEM header is invalid - */ - private static Cipher getCipherFromParameters(String dekHeaderValue, char[] password) throws GeneralSecurityException, IOException { - final String padding = "PKCS5Padding"; - final SecretKey encryptionKey; - final String[] valueTokens = dekHeaderValue.split(","); - if (valueTokens.length != 2) { - throw new IOException("Malformed PEM file, DEK-Info PEM header is invalid"); - } - final String algorithm = valueTokens[0]; - final String ivString = valueTokens[1]; - final byte[] iv; - try { - iv = hexStringToByteArray(ivString); - } catch (IllegalArgumentException e) { - throw new IOException("Malformed PEM file, DEK-Info IV is invalid", e); - } - if ("DES-CBC".equals(algorithm)) { - byte[] key = generateOpenSslKey(password, iv, 8); - encryptionKey = new SecretKeySpec(key, "DES"); - } else if ("DES-EDE3-CBC".equals(algorithm)) { - byte[] key = generateOpenSslKey(password, iv, 24); - encryptionKey = new SecretKeySpec(key, "DESede"); - } else if ("AES-128-CBC".equals(algorithm)) { - byte[] key = generateOpenSslKey(password, iv, 16); - encryptionKey = new SecretKeySpec(key, "AES"); - } else if ("AES-192-CBC".equals(algorithm)) { - byte[] key = generateOpenSslKey(password, iv, 24); - encryptionKey = new SecretKeySpec(key, "AES"); - } else if ("AES-256-CBC".equals(algorithm)) { - byte[] key = generateOpenSslKey(password, iv, 32); - encryptionKey = new SecretKeySpec(key, "AES"); - } else { - throw new GeneralSecurityException("Private Key encrypted with unsupported algorithm [" + algorithm + "]"); - } - String transformation = encryptionKey.getAlgorithm() + "/" + "CBC" + "/" + padding; - Cipher cipher = Cipher.getInstance(transformation); - cipher.init(Cipher.DECRYPT_MODE, encryptionKey, new IvParameterSpec(iv)); - return cipher; - } - - /** - * Performs key stretching in the same manner that OpenSSL does. This is basically a KDF - * that uses n rounds of salted MD5 (as many times as needed to get the necessary number of key bytes) - *

- * https://www.openssl.org/docs/man1.1.0/crypto/PEM_write_bio_PrivateKey_traditional.html - */ - private static byte[] generateOpenSslKey(char[] password, byte[] salt, int keyLength) { - byte[] passwordBytes = CharArrays.toUtf8Bytes(password); - MessageDigest md5 = SslUtil.messageDigest("md5"); - byte[] key = new byte[keyLength]; - int copied = 0; - int remaining; - while (copied < keyLength) { - remaining = keyLength - copied; - md5.update(passwordBytes, 0, passwordBytes.length); - md5.update(salt, 0, 8);// AES IV (salt) is longer but we only need 8 bytes - byte[] tempDigest = md5.digest(); - int bytesToCopy = (remaining > 16) ? 16 : remaining; // MD5 digests are 16 bytes - System.arraycopy(tempDigest, 0, key, copied, bytesToCopy); - copied += bytesToCopy; - if (remaining == 0) { - break; - } - md5.update(tempDigest, 0, 16); // use previous round digest as IV - } - Arrays.fill(passwordBytes, (byte) 0); - return key; - } - - /** - * Converts a hexadecimal string to a byte array - */ - private static byte[] hexStringToByteArray(String hexString) { - int len = hexString.length(); - if (len % 2 == 0) { - byte[] data = new byte[len / 2]; - for (int i = 0; i < len; i += 2) { - final int k = Character.digit(hexString.charAt(i), 16); - final int l = Character.digit(hexString.charAt(i + 1), 16); - if (k == -1 || l == -1) { - throw new IllegalStateException("String [" + hexString + "] is not hexadecimal"); + * @return high-level Object from the content + */ + private static Object readObject(Path keyPath, PEMParser pemParser) throws IOException { + while (pemParser.ready()) { + try { + var object = pemParser.readObject(); + if (object == null) { // ignore unknown objects; + continue; } - data[i / 2] = (byte) ((k << 4) + l); - } - return data; - } else { - throw new IllegalStateException( - "Hexadecimal string [" + hexString + "] has odd length and cannot be converted to a byte array" - ); - } - } - - /** - * Parses a DER encoded EC key to an {@link ECPrivateKeySpec} using a minimal {@link DerParser} - * - * @param keyBytes the private key raw bytes - * @return {@link ECPrivateKeySpec} - * @throws IOException if the DER encoded key can't be parsed - */ - private static ECPrivateKeySpec parseEcDer(byte[] keyBytes) throws IOException, GeneralSecurityException { - DerParser parser = new DerParser(keyBytes); - DerParser.Asn1Object sequence = parser.readAsn1Object(); - parser = sequence.getParser(); - parser.readAsn1Object().getInteger(); // version - String keyHex = parser.readAsn1Object().getString(); - BigInteger privateKeyInt = new BigInteger(keyHex, 16); - DerParser.Asn1Object choice = parser.readAsn1Object(); - parser = choice.getParser(); - String namedCurve = getEcCurveNameFromOid(parser.readAsn1Object().getOid()); - KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("EC"); - AlgorithmParameterSpec algorithmParameterSpec = new ECGenParameterSpec(namedCurve); - keyPairGenerator.initialize(algorithmParameterSpec); - ECParameterSpec parameterSpec = ((ECKey) keyPairGenerator.generateKeyPair().getPrivate()).getParams(); - return new ECPrivateKeySpec(privateKeyInt, parameterSpec); - } - - /** - * Parses a DER encoded RSA key to a {@link RSAPrivateCrtKeySpec} using a minimal {@link DerParser} - * - * @param keyBytes the private key raw bytes - * @return {@link RSAPrivateCrtKeySpec} - * @throws IOException if the DER encoded key can't be parsed - */ - private static RSAPrivateCrtKeySpec parseRsaDer(byte[] keyBytes) throws IOException { - DerParser parser = new DerParser(keyBytes); - DerParser.Asn1Object sequence = parser.readAsn1Object(); - parser = sequence.getParser(); - parser.readAsn1Object().getInteger(); // (version) We don't need it but must read to get to modulus - BigInteger modulus = parser.readAsn1Object().getInteger(); - BigInteger publicExponent = parser.readAsn1Object().getInteger(); - BigInteger privateExponent = parser.readAsn1Object().getInteger(); - BigInteger prime1 = parser.readAsn1Object().getInteger(); - BigInteger prime2 = parser.readAsn1Object().getInteger(); - BigInteger exponent1 = parser.readAsn1Object().getInteger(); - BigInteger exponent2 = parser.readAsn1Object().getInteger(); - BigInteger coefficient = parser.readAsn1Object().getInteger(); - return new RSAPrivateCrtKeySpec(modulus, publicExponent, privateExponent, prime1, prime2, exponent1, exponent2, coefficient); - } - - /** - * Parses a DER encoded DSA key to a {@link DSAPrivateKeySpec} using a minimal {@link DerParser} - * - * @param keyBytes the private key raw bytes - * @return {@link DSAPrivateKeySpec} - * @throws IOException if the DER encoded key can't be parsed - */ - private static DSAPrivateKeySpec parseDsaDer(byte[] keyBytes) throws IOException { - DerParser parser = new DerParser(keyBytes); - DerParser.Asn1Object sequence = parser.readAsn1Object(); - parser = sequence.getParser(); - parser.readAsn1Object().getInteger(); // (version) We don't need it but must read to get to p - BigInteger p = parser.readAsn1Object().getInteger(); - BigInteger q = parser.readAsn1Object().getInteger(); - BigInteger g = parser.readAsn1Object().getInteger(); - parser.readAsn1Object().getInteger(); // we don't need x - BigInteger x = parser.readAsn1Object().getInteger(); - return new DSAPrivateKeySpec(x, p, q, g); - } - - /** - * Parses a DER encoded private key and reads its algorithm identifier Object OID. - * - * @param keyBytes the private key raw bytes - * @return A string identifier for the key algorithm (RSA, DSA, or EC) - * @throws GeneralSecurityException if the algorithm oid that is parsed from ASN.1 is unknown - * @throws IOException if the DER encoded key can't be parsed - */ - private static String getKeyAlgorithmIdentifier(byte[] keyBytes) throws IOException, GeneralSecurityException { - DerParser parser = new DerParser(keyBytes); - DerParser.Asn1Object sequence = parser.readAsn1Object(); - parser = sequence.getParser(); - parser.readAsn1Object().getInteger(); // version - DerParser.Asn1Object algSequence = parser.readAsn1Object(); - parser = algSequence.getParser(); - String oidString = parser.readAsn1Object().getOid(); - switch (oidString) { - case "1.2.840.10040.4.1": - return "DSA"; - case "1.2.840.113549.1.1.1": - return "RSA"; - case "1.2.840.10045.2.1": - return "EC"; - } - throw new GeneralSecurityException( - "Error parsing key algorithm identifier. Algorithm with OID [" + oidString + "] is not żsupported" - ); - } - - static List readCertificates(Collection certPaths) throws CertificateException, IOException { - CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); - List certificates = new ArrayList<>(certPaths.size()); - for (Path path : certPaths) { - try (InputStream input = Files.newInputStream(path)) { - final Collection parsed = certFactory.generateCertificates(input); - if (parsed.isEmpty()) { - throw new SslConfigException("failed to parse any certificates from [" + path.toAbsolutePath() + "]"); + if (object instanceof ASN1ObjectIdentifier) { // ignore -----BEGIN EC PARAMETERS----- + continue; } - certificates.addAll(parsed); + return object; + } catch (IOException e) { // ignore -----BEGIN DSA PARAMETERS----- + // ignore } } - return certificates; - } - - private static String getEcCurveNameFromOid(String oidString) throws GeneralSecurityException { - switch (oidString) { - // see https://tools.ietf.org/html/rfc5480#section-2.1.1.1 - case "1.2.840.10045.3.1": - return "secp192r1"; - case "1.3.132.0.1": - return "sect163k1"; - case "1.3.132.0.15": - return "sect163r2"; - case "1.3.132.0.33": - return "secp224r1"; - case "1.3.132.0.26": - return "sect233k1"; - case "1.3.132.0.27": - return "sect233r1"; - case "1.2.840.10045.3.1.7": - return "secp256r1"; - case "1.3.132.0.16": - return "sect283k1"; - case "1.3.132.0.17": - return "sect283r1"; - case "1.3.132.0.34": - return "secp384r1"; - case "1.3.132.0.36": - return "sect409k1"; - case "1.3.132.0.37": - return "sect409r1"; - case "1.3.132.0.35": - return "secp521r1"; - case "1.3.132.0.38": - return "sect571k1"; - case "1.3.132.0.39": - return "sect571r1"; - } - throw new GeneralSecurityException( - "Error parsing EC named curve identifier. Named curve with OID: " + oidString + " is not supported" + throw new SslConfigException( + "Error parsing Private Key [" + keyPath.toAbsolutePath() + "]. The file is empty, or does not contain expected key format." ); } diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfiguration.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfiguration.java index 23acb0ff269e2..546d7f0ebd994 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfiguration.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfiguration.java @@ -38,7 +38,6 @@ import java.nio.file.Path; import java.security.GeneralSecurityException; -import java.security.NoSuchAlgorithmException; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -66,12 +65,7 @@ public class SslConfiguration { static final Map ORDERED_PROTOCOL_ALGORITHM_MAP; static { LinkedHashMap protocolAlgorithmMap = new LinkedHashMap<>(); - try { - SSLContext.getInstance("TLSv1.3"); - protocolAlgorithmMap.put("TLSv1.3", "TLSv1.3"); - } catch (NoSuchAlgorithmException e) { - // ignore since we support JVMs (and BC JSSE in FIPS mode) that do not support TLSv1.3 - } + protocolAlgorithmMap.put("TLSv1.3", "TLSv1.3"); protocolAlgorithmMap.put("TLSv1.2", "TLSv1.2"); protocolAlgorithmMap.put("TLSv1.1", "TLSv1.1"); protocolAlgorithmMap.put("TLSv1", "TLSv1"); diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java index 0b06a0692197e..433bec734e0b8 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java @@ -360,14 +360,11 @@ private List resolveListSetting(String key, Function parser, L private static List loadDefaultCiphers() { final boolean has256BitAES = has256BitAES(); - final boolean tlsV13Supported = DEFAULT_PROTOCOLS.contains("TLSv1.3"); List ciphers = new ArrayList<>(); - if (tlsV13Supported) { // TLSv1.3 cipher has PFS, AEAD, hardware support - if (has256BitAES) { - ciphers.add("TLS_AES_256_GCM_SHA384"); - } - ciphers.add("TLS_AES_128_GCM_SHA256"); + if (has256BitAES) { + ciphers.add("TLS_AES_256_GCM_SHA384"); } + ciphers.add("TLS_AES_128_GCM_SHA256"); // use GCM: PFS, AEAD, hardware support if (has256BitAES) { ciphers.addAll( diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java index 688f03a1e51fa..70cb76ceaec51 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java @@ -41,11 +41,11 @@ import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.nio.file.StandardCopyOption; -import java.security.GeneralSecurityException; import java.security.PrivateKey; import java.security.cert.CertificateParsingException; import java.security.cert.X509Certificate; import java.util.Arrays; +import java.util.function.Supplier; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -58,6 +58,7 @@ public class PemKeyConfigTests extends OpenSearchTestCase { private static final int IP_NAME = 7; private static final int DNS_NAME = 2; + private static final Supplier STRONG_PRIVATE_SECRET = "6!6428DQXwPpi7@$ggeg/="::toCharArray; public void testBuildKeyConfigFromPkcs1PemFilesWithoutPassword() throws Exception { final Path cert = getDataPath("/certs/cert1/cert1.crt"); @@ -68,8 +69,8 @@ public void testBuildKeyConfigFromPkcs1PemFilesWithoutPassword() throws Exceptio } public void testBuildKeyConfigFromPkcs1PemFilesWithPassword() throws Exception { - final Path cert = getDataPath("/certs/cert2/cert2.crt"); - final Path key = getDataPath("/certs/cert2/cert2.key"); + final Path cert = getDataPath("/certs/cert2/cert2-pkcs1.crt"); + final Path key = getDataPath("/certs/cert2/cert2-pkcs1.key"); final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, "c2-pass".toCharArray()); assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(cert, key)); assertCertificateAndKey(keyConfig, "CN=cert2"); @@ -77,7 +78,7 @@ public void testBuildKeyConfigFromPkcs1PemFilesWithPassword() throws Exception { public void testBuildKeyConfigFromPkcs8PemFilesWithoutPassword() throws Exception { final Path cert = getDataPath("/certs/cert1/cert1.crt"); - final Path key = getDataPath("/certs/cert1/cert1-pkcs8.key"); + final Path key = getDataPath("/certs/cert1/cert1.key"); final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, new char[0]); assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(cert, key)); assertCertificateAndKey(keyConfig, "CN=cert1"); @@ -86,8 +87,8 @@ public void testBuildKeyConfigFromPkcs8PemFilesWithoutPassword() throws Exceptio public void testBuildKeyConfigFromPkcs8PemFilesWithPassword() throws Exception { assumeFalse("Can't run in a FIPS JVM, PBE KeySpec is not available", inFipsJvm()); final Path cert = getDataPath("/certs/cert2/cert2.crt"); - final Path key = getDataPath("/certs/cert2/cert2-pkcs8.key"); - final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, "c2-pass".toCharArray()); + final Path key = getDataPath("/certs/cert2/cert2.key"); + final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, STRONG_PRIVATE_SECRET.get()); assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(cert, key)); assertCertificateAndKey(keyConfig, "CN=cert2"); } @@ -166,7 +167,7 @@ private void assertPasswordIsIncorrect(PemKeyConfig keyConfig, Path key) { final SslConfigException exception = expectThrows(SslConfigException.class, keyConfig::createKeyManager); assertThat(exception.getMessage(), containsString("private key file")); assertThat(exception.getMessage(), containsString(key.toAbsolutePath().toString())); - assertThat(exception.getCause(), instanceOf(GeneralSecurityException.class)); + assertThat(exception, instanceOf(SslConfigException.class)); } private void assertFileNotFound(PemKeyConfig keyConfig, String type, Path file) { diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java index e664e379d1e97..4175b0ee424b7 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java @@ -81,7 +81,7 @@ public void testEmptyFileFails() throws Exception { final Path ca = createTempFile("ca", ".crt"); final PemTrustConfig trustConfig = new PemTrustConfig(Collections.singletonList(ca)); assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ca)); - assertEmptyFile(trustConfig, ca); + assertFailedToParse(trustConfig, ca); } public void testMissingFileFailsWithMeaningfulMessage() throws Exception { @@ -135,21 +135,16 @@ private void assertCertificateChain(PemTrustConfig trustConfig, String... caName assertThat(issuerNames, Matchers.containsInAnyOrder(caNames)); } - private void assertEmptyFile(PemTrustConfig trustConfig, Path file) { + private void assertFailedToParse(PemTrustConfig trustConfig, Path file) { final SslConfigException exception = expectThrows(SslConfigException.class, trustConfig::createTrustManager); logger.info("failure", exception); assertThat(exception.getMessage(), Matchers.containsString(file.toAbsolutePath().toString())); - assertThat(exception.getMessage(), Matchers.containsString("failed to parse any certificates")); + assertThat(exception.getMessage(), Matchers.containsString("Failed to parse any certificate from")); } private void assertInvalidFileFormat(PemTrustConfig trustConfig, Path file) { final SslConfigException exception = expectThrows(SslConfigException.class, trustConfig::createTrustManager); assertThat(exception.getMessage(), Matchers.containsString(file.toAbsolutePath().toString())); - // When running on BC-FIPS, an invalid file format *might* just fail to parse, without any errors (just like an empty file) - // or it might behave per the SUN provider, and throw a GSE (depending on exactly what was invalid) - if (inFipsJvm() && exception.getMessage().contains("failed to parse any certificates")) { - return; - } assertThat(exception.getMessage(), Matchers.containsString("cannot create trust")); assertThat(exception.getMessage(), Matchers.containsString("PEM")); assertThat(exception.getCause(), Matchers.instanceOf(GeneralSecurityException.class)); diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemUtilsTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemUtilsTests.java index c7ca19bb679d3..f1255ab64f672 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemUtilsTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemUtilsTests.java @@ -32,8 +32,11 @@ package org.opensearch.common.ssl; +import org.bouncycastle.asn1.ASN1ObjectIdentifier; +import org.bouncycastle.asn1.pkcs.PrivateKeyInfo; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; @@ -44,6 +47,7 @@ import java.security.interfaces.ECPrivateKey; import java.security.spec.ECGenParameterSpec; import java.security.spec.ECParameterSpec; +import java.util.Locale; import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; @@ -55,6 +59,7 @@ public class PemUtilsTests extends OpenSearchTestCase { private static final Supplier EMPTY_PASSWORD = () -> new char[0]; private static final Supplier TESTNODE_PASSWORD = "testnode"::toCharArray; + private static final Supplier STRONG_PRIVATE_SECRET = "6!6428DQXwPpi7@$ggeg/="::toCharArray; public void testReadPKCS8RsaKey() throws Exception { Key key = getKeyFromKeystore("RSA"); @@ -82,6 +87,16 @@ public void testReadPKCS8DsaKey() throws Exception { assertThat(privateKey, equalTo(key)); } + public void testReadEncryptedPKCS8DsaKey() throws Exception { + Key key = getKeyFromKeystore("DSA"); + assertThat(key, notNullValue()); + assertThat(key, instanceOf(PrivateKey.class)); + PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/dsa_key_pkcs8_encrypted.pem"), TESTNODE_PASSWORD); + + assertThat(privateKey, notNullValue()); + assertThat(privateKey, equalTo(key)); + } + public void testReadEcKeyCurves() throws Exception { String curve = randomFrom("secp256r1", "secp384r1", "secp521r1"); PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/private_" + curve + ".pem"), ""::toCharArray); @@ -102,6 +117,16 @@ public void testReadPKCS8EcKey() throws Exception { assertThat(privateKey, equalTo(key)); } + public void testReadEncryptedPKCS8EcKey() throws Exception { + var key = getKeyFromKeystore("EC"); + assertThat(key, notNullValue()); + assertThat(key, instanceOf(PrivateKey.class)); + var privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/ec_key_pkcs8_encrypted.pem"), TESTNODE_PASSWORD); + + assertThat(privateKey, notNullValue()); + assertThat(privateKey, equalTo(key)); + } + public void testReadEncryptedPKCS8Key() throws Exception { assumeFalse("Can't run in a FIPS JVM, PBE KeySpec is not available", inFipsJvm()); Key key = getKeyFromKeystore("RSA"); @@ -176,13 +201,12 @@ public void testReadEncryptedOpenSslDsaKey() throws Exception { } public void testReadOpenSslEcKey() throws Exception { - Key key = getKeyFromKeystore("EC"); + var key = getKeyFromKeystore("EC"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/ec_key_openssl_plain.pem"), EMPTY_PASSWORD); + var privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/ec_key_openssl_plain.pem"), EMPTY_PASSWORD); - assertThat(privateKey, notNullValue()); - assertThat(privateKey, equalTo(key)); + assertTrue(isCryptographicallyEqual((ECPrivateKey) key, (ECPrivateKey) privateKey)); } public void testReadOpenSslEcKeyWithParams() throws Exception { @@ -194,16 +218,41 @@ public void testReadOpenSslEcKeyWithParams() throws Exception { EMPTY_PASSWORD ); + assertTrue(isCryptographicallyEqual((ECPrivateKey) key, (ECPrivateKey) privateKey)); + } + + public void testReadEncryptedOpenSslEcKey() throws Exception { + var key = getKeyFromKeystore("EC"); + assertThat(key, notNullValue()); + assertThat(key, instanceOf(PrivateKey.class)); + var privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/ec_key_openssl_encrypted.pem"), TESTNODE_PASSWORD); + + assertTrue(isCryptographicallyEqual((ECPrivateKey) key, (ECPrivateKey) privateKey)); + } + + public void testReadEncryptedPKCS8KeyWithPBKDF2() throws Exception { + Key key = getKeyFromKeystore("PKCS8_PBKDF2"); + assertThat(key, notNullValue()); + assertThat(key, instanceOf(PrivateKey.class)); + PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/key_PKCS8_enc_pbkdf2.pem"), STRONG_PRIVATE_SECRET); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); } - public void testReadEncryptedOpenSslEcKey() throws Exception { - Key key = getKeyFromKeystore("EC"); + public void testReadEncryptedDsaKeyWithPBKDF2() throws Exception { + Key key = getKeyFromKeystore("DSA_PBKDF2"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/ec_key_openssl_encrypted.pem"), TESTNODE_PASSWORD); + PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/key_DSA_enc_pbkdf2.pem"), STRONG_PRIVATE_SECRET); + assertThat(privateKey, notNullValue()); + assertThat(privateKey, equalTo(key)); + } + public void testReadEncryptedEcKeyWithPBKDF2() throws Exception { + Key key = getKeyFromKeystore("EC_PBKDF2"); + assertThat(key, notNullValue()); + assertThat(key, instanceOf(PrivateKey.class)); + PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/key_EC_enc_pbkdf2.pem"), EMPTY_PASSWORD); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); } @@ -211,24 +260,24 @@ public void testReadEncryptedOpenSslEcKey() throws Exception { public void testReadUnsupportedKey() { final Path path = getDataPath("/certs/pem-utils/key_unsupported.pem"); SslConfigException e = expectThrows(SslConfigException.class, () -> PemUtils.readPrivateKey(path, TESTNODE_PASSWORD)); - assertThat(e.getMessage(), containsString("file does not contain a supported key format")); + assertThat(e.getMessage(), containsString("Error parsing Private Key")); assertThat(e.getMessage(), containsString(path.toAbsolutePath().toString())); + assertThat(e.getMessage(), containsString("file is empty")); } public void testReadPemCertificateAsKey() { final Path path = getDataPath("/certs/pem-utils/testnode.crt"); SslConfigException e = expectThrows(SslConfigException.class, () -> PemUtils.readPrivateKey(path, TESTNODE_PASSWORD)); - assertThat(e.getMessage(), containsString("file does not contain a supported key format")); + assertThat(e.getMessage(), containsString("invalid encrypted private key class")); assertThat(e.getMessage(), containsString(path.toAbsolutePath().toString())); } public void testReadCorruptedKey() { final Path path = getDataPath("/certs/pem-utils/corrupted_key_pkcs8_plain.pem"); SslConfigException e = expectThrows(SslConfigException.class, () -> PemUtils.readPrivateKey(path, TESTNODE_PASSWORD)); - assertThat(e.getMessage(), containsString("private key")); - assertThat(e.getMessage(), containsString("cannot be parsed")); + assertThat(e.getMessage(), containsString("Error parsing Private Key")); assertThat(e.getMessage(), containsString(path.toAbsolutePath().toString())); - assertThat(e.getCause().getMessage(), containsString("PEM footer is invalid or missing")); + assertThat(e.getMessage(), containsString("file is empty")); } public void testReadEmptyFile() { @@ -239,11 +288,27 @@ public void testReadEmptyFile() { } private Key getKeyFromKeystore(String algo) throws Exception { - Path keystorePath = getDataPath("/certs/pem-utils/testnode.jks"); + var keystorePath = getDataPath("/certs/pem-utils/testnode.jks"); + var alias = "testnode_" + algo.toLowerCase(Locale.ROOT); + var password = "testnode".toCharArray(); try (InputStream in = Files.newInputStream(keystorePath)) { KeyStore keyStore = KeyStore.getInstance("jks"); - keyStore.load(in, "testnode".toCharArray()); - return keyStore.getKey("testnode_" + algo, "testnode".toCharArray()); + keyStore.load(in, password); + return keyStore.getKey(alias, password); } } + + private boolean isCryptographicallyEqual(ECPrivateKey key1, ECPrivateKey key2) throws IOException { + var pki1 = PrivateKeyInfo.getInstance(key1.getEncoded()); + var pki2 = PrivateKeyInfo.getInstance(key2.getEncoded()); + + var privateKey1 = org.bouncycastle.asn1.sec.ECPrivateKey.getInstance(pki1.parsePrivateKey()).getKey(); + var privateKey2 = org.bouncycastle.asn1.sec.ECPrivateKey.getInstance(pki2.parsePrivateKey()).getKey(); + + var oid1 = ASN1ObjectIdentifier.getInstance(pki1.getPrivateKeyAlgorithm().getParameters()); + var oid2 = ASN1ObjectIdentifier.getInstance(pki2.getPrivateKeyAlgorithm().getParameters()); + + return privateKey1.equals(privateKey2) && oid1.equals(oid2); + } + } diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationLoaderTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationLoaderTests.java index 5af7ddc73e680..366e936ca4852 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationLoaderTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationLoaderTests.java @@ -53,6 +53,7 @@ public class SslConfigurationLoaderTests extends OpenSearchTestCase { + private final String STRONG_PRIVATE_SECRET = "6!6428DQXwPpi7@$ggeg/="; private final Path certRoot = getDataPath("/certs/ca1/ca.crt").getParent().getParent(); private Settings settings; @@ -166,9 +167,9 @@ public void testLoadKeysFromPemFiles() { .put("test.ssl.key", certName + "/" + certName + ".key"); if (usePassword) { if (useLegacyPassword) { - builder.put("test.ssl.key_passphrase", "c2-pass"); + builder.put("test.ssl.key_passphrase", STRONG_PRIVATE_SECRET); } else { - secureSettings.setString("test.ssl.secure_key_passphrase", "c2-pass"); + secureSettings.setString("test.ssl.secure_key_passphrase", STRONG_PRIVATE_SECRET); } } settings = builder.build(); diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java index c966b4259219f..e19fa91f7773e 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java @@ -70,6 +70,12 @@ public class SslDiagnosticsTests extends OpenSearchTestCase { private static final byte[] MOCK_ENCODING_4 = { 0x64, 0x65, 0x66, 0x67, 0x68, 0x69 }; private static final String MOCK_FINGERPRINT_4 = "5d96965bfae50bf2be0d6259eb87a6cc9f5d0b26"; + public void testTrustEmptyStore() { + var fileName = "cert-all/empty.jks"; + var exception = assertThrows(CertificateException.class, () -> loadCertificate(fileName)); + assertThat(exception.getMessage(), Matchers.equalTo("No certificate data found")); + } + public void testDiagnosticMessageWhenServerProvidesAFullCertChainThatIsTrusted() throws Exception { X509Certificate[] chain = loadCertChain("cert1/cert1.crt", "ca1/ca.crt"); final SSLSession session = session("192.168.1.1"); @@ -85,7 +91,7 @@ public void testDiagnosticMessageWhenServerProvidesAFullCertChainThatIsTrusted() message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.1];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1];" + " the certificate is signed by" @@ -110,7 +116,7 @@ public void testDiagnosticMessageWhenServerProvidesAFullCertChainThatIsntTrusted message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.1];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1];" + " the certificate is signed by (subject [CN=Test CA 1] fingerprint [2b7b0416391bdf86502505c23149022d2213dadc])" @@ -134,7 +140,7 @@ public void testDiagnosticMessageWhenServerFullCertChainIsntTrustedButMimicIssue message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.1];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1];" + " the certificate is signed by (subject [CN=Test CA 1] fingerprint [2b7b0416391bdf86502505c23149022d2213dadc])" @@ -160,7 +166,7 @@ public void testDiagnosticMessageWhenServerProvidesEndCertificateOnlyAndTheCertA message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.1];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1]" + " but the server did not provide a copy of the issuing certificate in the certificate chain;" @@ -185,7 +191,7 @@ public void testDiagnosticMessageWhenServerProvidesEndCertificateOnlyButTheCertA message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.1];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1]" + " but the server did not provide a copy of the issuing certificate in the certificate chain;" @@ -209,7 +215,7 @@ public void testDiagnosticMessageWhenServerProvidesEndCertificateOnlyWithMimicIs message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.1];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1]" + " but the server did not provide a copy of the issuing certificate in the certificate chain;" @@ -235,7 +241,7 @@ public void testDiagnosticMessageWhenServerProvidesEndCertificateWithMultipleMim message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.9];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1]" + " but the server did not provide a copy of the issuing certificate in the certificate chain;" @@ -538,7 +544,7 @@ public void testDiagnosticMessageForClientCertificate() throws Exception { Matchers.equalTo( "failed to establish trust with client at [192.168.1.7];" + " the client provided a certificate with subject name [CN=cert1]" - + " and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate is issued by [CN=Test CA 1]" + " but the client did not provide a copy of the issuing certificate in the certificate chain;" + " the issuing certificate with fingerprint [2b7b0416391bdf86502505c23149022d2213dadc]" @@ -571,7 +577,7 @@ public void testDiagnosticMessageWhenCaHasNewIssuingCertificate() throws Excepti message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.4];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1];" + " the certificate is signed by (subject [CN=Test CA 1]" diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java index 7806671d02793..1745c547d04ee 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java @@ -48,6 +48,7 @@ import java.security.cert.X509Certificate; import java.util.Arrays; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -95,7 +96,7 @@ public void testLoadMultipleKeyJksWithSeparateKeyPassword() throws Exception { assertKeysLoaded(keyConfig, "cert1", "cert2"); } - public void testKeyManagerFailsWithIncorrectStorePassword() throws Exception { + public void testKeyManagerFailsWithIncorrectJksStorePassword() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); final Path jks = getDataPath("/certs/cert-all/certs.jks"); final StoreKeyConfig keyConfig = new StoreKeyConfig( @@ -109,7 +110,7 @@ public void testKeyManagerFailsWithIncorrectStorePassword() throws Exception { assertPasswordIsIncorrect(keyConfig, jks); } - public void testKeyManagerFailsWithIncorrectKeyPassword() throws Exception { + public void testKeyManagerFailsWithIncorrectJksKeyPassword() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); final Path jks = getDataPath("/certs/cert-all/certs.jks"); final StoreKeyConfig keyConfig = new StoreKeyConfig(jks, JKS_PASS, "jks", JKS_PASS, KeyManagerFactory.getDefaultAlgorithm()); @@ -125,21 +126,20 @@ public void testKeyManagerFailsWithMissingKeystoreFile() throws Exception { assertFileNotFound(keyConfig, path); } - public void testMissingKeyEntriesFailsWithMeaningfulMessage() throws Exception { + public void testMissingKeyEntriesFailsForJksWithMeaningfulMessage() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); - final Path ks; - final char[] password; - final String type; - if (randomBoolean()) { - type = "PKCS12"; - ks = getDataPath("/certs/ca-all/ca.p12"); - password = P12_PASS; - } else { - type = "jks"; - ks = getDataPath("/certs/ca-all/ca.jks"); - password = JKS_PASS; - } - final StoreKeyConfig keyConfig = new StoreKeyConfig(ks, password, type, password, KeyManagerFactory.getDefaultAlgorithm()); + final Path ks = getDataPath("/certs/ca-all/ca.jks"); + final char[] password = JKS_PASS; + final StoreKeyConfig keyConfig = new StoreKeyConfig(ks, password, "jks", password, KeyManagerFactory.getDefaultAlgorithm()); + assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); + assertNoPrivateKeyEntries(keyConfig, ks); + } + + public void testMissingKeyEntriesFailsForP12WithMeaningfulMessage() throws Exception { + assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); + final Path ks = getDataPath("/certs/ca-all/ca.p12"); + final char[] password = P12_PASS; + final StoreKeyConfig keyConfig = new StoreKeyConfig(ks, password, "PKCS12", password, KeyManagerFactory.getDefaultAlgorithm()); assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); assertNoPrivateKeyEntries(keyConfig, ks); } @@ -211,7 +211,10 @@ private void assertPasswordIsIncorrect(StoreKeyConfig keyConfig, Path key) { assertThat(exception.getMessage(), containsString("password")); } else { assertThat(exception.getCause(), instanceOf(IOException.class)); - assertThat(exception.getCause().getMessage(), containsString("password")); + assertThat( + exception.getCause().getMessage(), + anyOf(containsString("Keystore was tampered with, or password was incorrect"), containsString("BCFKS KeyStore corrupted")) + ); } } diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java index 5609f0fa2c877..8058ffe95dc93 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java @@ -58,7 +58,7 @@ public class StoreTrustConfigTests extends OpenSearchTestCase { private static final char[] JKS_PASS = "jks-pass".toCharArray(); private static final String DEFAULT_ALGORITHM = TrustManagerFactory.getDefaultAlgorithm(); - public void testBuildTrustConfigFromPKCS12() throws Exception { + public void testBuildTrustConfigFromP12() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); final Path ks = getDataPath("/certs/ca1/ca.p12"); final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, P12_PASS, "PKCS12", DEFAULT_ALGORITHM); @@ -66,7 +66,7 @@ public void testBuildTrustConfigFromPKCS12() throws Exception { assertCertificateChain(trustConfig, "CN=Test CA 1"); } - public void testBuildTrustConfigFromJKS() throws Exception { + public void testBuildTrustConfigFromJks() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); final Path ks = getDataPath("/certs/ca-all/ca.jks"); final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, JKS_PASS, "jks", DEFAULT_ALGORITHM); @@ -91,28 +91,25 @@ public void testMissingKeyStoreFailsWithMeaningfulMessage() throws Exception { assertFileNotFound(trustConfig, ks); } - public void testIncorrectPasswordFailsWithMeaningfulMessage() throws Exception { + public void testIncorrectPasswordFailsForP12WithMeaningfulMessage() throws Exception { final Path ks = getDataPath("/certs/ca1/ca.p12"); final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, new char[0], "PKCS12", DEFAULT_ALGORITHM); assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); assertPasswordIsIncorrect(trustConfig, ks); } - public void testMissingTrustEntriesFailsWithMeaningfulMessage() throws Exception { + public void testMissingTrustEntriesFailsForJksKeystoreWithMeaningfulMessage() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); - final Path ks; - final char[] password; - final String type; - if (randomBoolean()) { - type = "PKCS12"; - ks = getDataPath("/certs/cert-all/certs.p12"); - password = P12_PASS; - } else { - type = "jks"; - ks = getDataPath("/certs/cert-all/certs.jks"); - password = JKS_PASS; - } - final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, password, type, DEFAULT_ALGORITHM); + final Path ks = getDataPath("/certs/cert-all/certs.jks"); + final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, JKS_PASS, "jks", DEFAULT_ALGORITHM); + assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); + assertNoCertificateEntries(trustConfig, ks); + } + + public void testMissingTrustEntriesFailsForP12KeystoreWithMeaningfulMessage() throws Exception { + assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); + final Path ks = getDataPath("/certs/cert-all/certs.p12"); + final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, P12_PASS, "PKCS12", DEFAULT_ALGORITHM); assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); assertNoCertificateEntries(trustConfig, ks); } diff --git a/libs/ssl-config/src/test/resources/certs/README.md b/libs/ssl-config/src/test/resources/certs/README.md new file mode 100644 index 0000000000000..79790a4918f3e --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/README.md @@ -0,0 +1,155 @@ +# Create first CA PEM ("ca1") + +```bash +opensearch-certutil ca --pem --out ca1.zip --days 9999 --ca-dn "CN=Test CA 1" +unzip ca1.zip +mv ca ca1 +``` + +# Create first CA PEM ("ca2") + +```bash +opensearch-certutil ca --pem --out ca2.zip --days 9999 --ca-dn "CN=Test CA 2" +unzip ca2.zip +mv ca ca2 +``` + +# Create first CA PEM ("ca3") + +```bash +opensearch-certutil ca --pem --out ca3.zip --days 9999 --ca-dn "CN=Test CA 3" +unzip ca3.zip +mv ca ca3 +``` + +# Create "cert1-pkcs1" PEM + +```bash +opensearch-certutil cert --pem --out cert1-pkcs1.zip --name cert1 --ip 127.0.0.1 --dns localhost --days 9999 --ca-key ca1/ca.key --ca-cert ca1/ca.crt +unzip cert1.zip +``` + +# Create "cert2-pkcs1" PEM (same as cert1, but with a password) + +```bash +opensearch-certutil cert --pem --out cert2-pkcs1.zip --name cert2 --ip 127.0.0.1 --dns localhost --days 9999 --ca-key ca1/ca.key --ca-cert ca1/ca.crt --pass "c2-pass" +unzip cert2.zip +``` + +# Create "cert1" PEM + +```bash +openssl genpkey -algorithm RSA -out cert1/cert1.key +openssl req -new \ + -key cert1/cert1.key \ + -subj "/CN=cert1" \ + -out cert1/cert1.csr +openssl x509 -req \ + -in cert1/cert1.csr \ + -CA ca1/ca.crt \ + -CAkey ca1/ca.key \ + -CAcreateserial \ + -out cert1/cert1.crt \ + -days 3650 \ + -sha256 \ + -extfile <(printf "subjectAltName=DNS:localhost,IP:127.0.0.1") +rm cert1/cert1.csr +``` + +# Create "cert2" PEM (same as cert1, but with a password) + +```bash +openssl genpkey -algorithm RSA -out cert2/cert2.key -aes256 -pass pass:"$KEY_PW" +openssl req -new \ +-key cert2/cert2.key \ +-subj "/CN=cert2" \ +-out cert2/cert2.csr \ +-passin pass:"$KEY_PW" +openssl x509 -req \ +-in cert2/cert2.csr \ +-CA ca1/ca.crt \ +-CAkey ca1/ca.key \ +-CAcreateserial \ +-out cert2/cert2.crt \ +-days 3650 \ +-sha256 \ +-extfile <(printf "subjectAltName=DNS:localhost,IP:127.0.0.1") \ +-passin pass:"$KEY_PW" +rm cert2/cert2.csr +``` + +# Convert CAs to PKCS#12 + +```bash +for n in 1 2 3 +do + keytool -importcert -file ca${n}/ca.crt -alias ca -keystore ca${n}/ca.p12 -storetype PKCS12 -storepass p12-pass -v + keytool -importcert -file ca${n}/ca.crt -alias ca${n} -keystore ca-all/ca.p12 -storetype PKCS12 -storepass p12-pass -v +done +``` + +# Convert CAs to JKS + +```bash +for n in 1 2 3 +do + keytool -importcert -file ca${n}/ca.crt -alias ca${n} -keystore ca-all/ca.jks -storetype jks -storepass jks-pass -v +done +``` + +# Convert Certs to PKCS#12 + +```bash +for Cert in cert1 cert2 +do + openssl pkcs12 -export -out $Cert/$Cert.p12 -inkey $Cert/$Cert.key -in $Cert/$Cert.crt -name $Cert -passout pass:p12-pass +done +``` + +# Import Certs into single PKCS#12 keystore + +```bash +for Cert in cert1 cert2 +do + keytool -importkeystore -noprompt \ + -srckeystore $Cert/$Cert.p12 -srcstoretype PKCS12 -srcstorepass p12-pass \ + -destkeystore cert-all/certs.p12 -deststoretype PKCS12 -deststorepass p12-pass +done +``` + +# Import Certs into single JKS keystore with separate key-password + +```bash +for Cert in cert1 cert2 +do + keytool -importkeystore -noprompt \ + -srckeystore $Cert/$Cert.p12 -srcstoretype PKCS12 -srcstorepass p12-pass \ + -destkeystore cert-all/certs.jks -deststoretype jks -deststorepass jks-pass + keytool -keypasswd -keystore cert-all/certs.jks -alias $Cert -keypass p12-pass -new key-pass -storepass jks-pass +done +``` + +# Create a mimic of the first CA ("ca1b") for testing certificates with the same name but different keys + +```bash +opensearch-certutil ca --pem --out ${PWD}/ca1-b.zip --days 9999 --ca-dn "CN=Test CA 1" +unzip ca1-b.zip +mv ca ca1-b +``` + +# Create empty KeyStore + +```bash +keytool -genkeypair \ + -alias temp \ + -storetype JKS \ + -keyalg rsa \ + -storepass storePassword \ + -keypass secretPassword \ + -keystore cert-all/empty.jks \ + -dname "CN=foo,DC=example,DC=com" +keytool -delete \ + -alias temp \ + -storepass storePassword \ + -keystore cert-all/empty.jks +``` diff --git a/libs/ssl-config/src/test/resources/certs/README.txt b/libs/ssl-config/src/test/resources/certs/README.txt deleted file mode 100644 index 09910e99a132e..0000000000000 --- a/libs/ssl-config/src/test/resources/certs/README.txt +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env bash -# -# This is README describes how the certificates in this directory were created. -# This file can also be executed as a script -# - -# 1. Create first CA PEM ("ca1") - -opensearch-certutil ca --pem --out ca1.zip --days 9999 --ca-dn "CN=Test CA 1" -unzip ca1.zip -mv ca ca1 - -# 2. Create first CA PEM ("ca2") - -opensearch-certutil ca --pem --out ca2.zip --days 9999 --ca-dn "CN=Test CA 2" -unzip ca2.zip -mv ca ca2 - -# 3. Create first CA PEM ("ca3") - -opensearch-certutil ca --pem --out ca3.zip --days 9999 --ca-dn "CN=Test CA 3" -unzip ca3.zip -mv ca ca3 - -# 4. Create "cert1" PEM - -opensearch-certutil cert --pem --out cert1.zip --name cert1 --ip 127.0.0.1 --dns localhost --days 9999 --ca-key ca1/ca.key --ca-cert ca1/ca.crt -unzip cert1.zip - -# 5. Create "cert2" PEM (same as cert1, but with a password) - -opensearch-certutil cert --pem --out cert2.zip --name cert2 --ip 127.0.0.1 --dns localhost --days 9999 --ca-key ca1/ca.key --ca-cert ca1/ca.crt --pass "c2-pass" -unzip cert2.zip - -# 6. Convert CAs to PKCS#12 - -for n in 1 2 3 -do - keytool -importcert -file ca${n}/ca.crt -alias ca -keystore ca${n}/ca.p12 -storetype PKCS12 -storepass p12-pass -v - keytool -importcert -file ca${n}/ca.crt -alias ca${n} -keystore ca-all/ca.p12 -storetype PKCS12 -storepass p12-pass -v -done - -# 7. Convert CAs to JKS - -for n in 1 2 3 -do - keytool -importcert -file ca${n}/ca.crt -alias ca${n} -keystore ca-all/ca.jks -storetype jks -storepass jks-pass -v -done - -# 8. Convert Certs to PKCS#12 - -for Cert in cert1 cert2 -do - openssl pkcs12 -export -out $Cert/$Cert.p12 -inkey $Cert/$Cert.key -in $Cert/$Cert.crt -name $Cert -passout pass:p12-pass -done - -# 9. Import Certs into single PKCS#12 keystore - -for Cert in cert1 cert2 -do - keytool -importkeystore -noprompt \ - -srckeystore $Cert/$Cert.p12 -srcstoretype PKCS12 -srcstorepass p12-pass \ - -destkeystore cert-all/certs.p12 -deststoretype PKCS12 -deststorepass p12-pass -done - -# 10. Import Certs into single JKS keystore with separate key-password - -for Cert in cert1 cert2 -do - keytool -importkeystore -noprompt \ - -srckeystore $Cert/$Cert.p12 -srcstoretype PKCS12 -srcstorepass p12-pass \ - -destkeystore cert-all/certs.jks -deststoretype jks -deststorepass jks-pass - keytool -keypasswd -keystore cert-all/certs.jks -alias $Cert -keypass p12-pass -new key-pass -storepass jks-pass -done - -# 11. Create a mimic of the first CA ("ca1b") for testing certificates with the same name but different keys - -opensearch-certutil ca --pem --out ${PWD}/ca1-b.zip --days 9999 --ca-dn "CN=Test CA 1" -unzip ca1-b.zip -mv ca ca1-b - -# 12. Convert certifcate keys to pkcs8 - -openssl pkcs8 -topk8 -inform PEM -in cert1/cert1.key -outform PEM -out cert1/cert1-pkcs8.key -nocrypt -openssl pkcs8 -topk8 -inform PEM -in cert2/cert2.key -outform PEM -out cert2/cert2-pkcs8.key -passin pass:"c2-pass" -passout pass:"c2-pass" diff --git a/libs/ssl-config/src/test/resources/certs/cert-all/certs.p12 b/libs/ssl-config/src/test/resources/certs/cert-all/certs.p12 index b971a1e39c83baeea8e4fab3cf6b76804047ee48..73912976ca7cc61d310d02f1f8805d35ea75f612 100644 GIT binary patch literal 4895 zcmbW3WmFW5mxp2Kl5WYNq#R%- z&+fkevtM@i?5BI~^E>x`y5~6;1S6*hpkshwWV6_K+;A264G}sHdI5}V5EDk$_ZN48 zU<9)NEg}%agc0!n#oQnmRth%3e@+qNqXP49>IJx9~XGZhuS|<`ngy?V1`SYn`F_>sZLIEB z62SqlOjq!GYG#jdSaXZD0FM?eQz>DgIf4xH54b{pNX2u7dudW(Y=WG9^hz3%QM9#GC}f|kavMIhHq5+cWb19HowMddyAl$;BUtf&f>N@lKCZn>2HLwQ$V6npQ$0}(*;JUg~=bQA4PvNdLD zfm|5qwRdOntvv~sqYdK9_Gd8OA^_~`SicplMfjOl&2q!Fdg{&msm@mX`V^Y3Kof-wJa;f^FM9wuYJ1Ms1o?K0=l0 z#unEY9glTqU5oYS&exA(5`@jLc_wLK9`o z)`R*oU+`;or1rR(q;1KUUYIDJ3oeoJ{J2ugg4a5tk%hK0lh8h;C1bSMH}zu5Pfe<` zk%hhJYrS&`tfR`__I)RH*TiRH$aDH(hwDNSJPt%WpKkO{FNa1Eh9kx8*Ki;?M0SjH z*dkPZ$|1wF%Fdb?mvZgfAD2Wf2RLM6jT}I&Dx1c*x}>xfr^81a{Ba0KFRa?$?UMa4 z;*V)c_PgHQJW?`6Rm~`e19uZ{Klba8=Mye5*H~uLV3D7}SBRBMRkcD(G2$&-(!z~I zieKxF$ReXsB-ifMpV#<&L%LRZq9eO9QiZ$Y5K&qxqrC@HrXA-Lm?++fRJa(eXVdxz}m^# z^D6Bs`U7S0BK%&a*`kU&bHX6s>E0Hfj8Pr0E;WDHyKm(CI^|e9YYaZbybdktS=Pl; zg-f{h;tWbQ{Lh+6%Ns=AY%T>Y=tI&|TA3X22Uq?{Gg1wXgp5KZoXbpG=gE69kWMvB zx2f>^c~gmySb1bGN~}Z>hl3A}9V=Yh>efbWG-wnFbny(#he|GA)3je~uZ07fx;KfY zJR!{^W?PQiwugl#Ku;U*VfZnZ_8dxvVZ2~A`he+HNyrj|Azg}6Ft+cdv~IClsIKAuCSps>S5K8F zKTcAXOEc=|GRhJr^9WNYUE{C#r7xUO(LS8Yugz1$j9{9PG-*-uKH;Ce0PWDcYc^Z9 zOre{9?7t=|_T|7?=rF>u`*s;h39Hp(yNlsdwkIv;&&7^h9!`@h{8G=$$n;}58KG7=3Lf7xYA*5kqtqo3Gc z$xm`^;yh;MNzb@wJ5~R(*3wZ%8i2>^8uA5Q*WEH6Z_@l0+(__@?lW)nyYFWn%scA3^n3 zt4)R#Dunn)f=?oK>djcil004nf_H8|={G=&VfhFlzYQzHGmULZ`W!}{*^7Ap!6$lj zNYA16M^OqTOJpH~8}8w$qrD?VP!jb%<!ucxtxTrfs zLF{pJxk9-T$d_4KaJXt5+7hIG|V87dj#m>I0`Jc}}RSkJ4wD0W{Z33rNhp7s&o{Q}?qaggwz>0Y=UqE7yi#LI4UX?J`_x@<8+fOkhUSt<&|M>hfB^`>#efJ^vR^x>J zWyJ34nX@{ltyzI|`a>+FJ@^Y*>b>2xY5DZqc5G*Lt@fACM;xC_#>Ob0J}e;TwpnTm=&#A_Umbx~v91yc)X}9z=W&A7WQyzN1KJvY z^S{?9+f=^EGwM)zJ41RwOz~iR)pbBH2J)HcGi0iw0EgY}zuk4Sadf8t{Pyw7)LuZ} z4vwa!<4}jhFV&ZUaq75yf=fgm653_`QXRz(a=8h)=D%ksZh&*mYanN3FiY7is30G9H>&%^kiOmzV|E( z^g~(9X{Ctw%;|zV#=u&Oc ze~g%7<@QmBdN{L|KctD@QAj~tI{!~vHJH1{oNWo^#4H;?gl)|+QI`o5%I!?^C?K&S zhMzhExu_Qob*+&|+xbjNK9RH4bvYwuhr}_c2G-l24=g%e8*{AGr9&Iz0Z^leJ+N^P1)e}f8HEn6-^q8rA!vmkg716noa zdWO__j-hh}gSO+ux8<%oO!+pg%G4U#X?vUxChqg~$`I|(ZntZCx`m-^M}eBM>A7e@ z;sKbSlKf8?4GMJ~x(BEC5Odw5Y6|pT=!Mg~?iZVblNDbDg8#UD%<_ zTAm8d^qA-Hou*95H6M8KapP4)Px4hO5&m4)-ZW78C1x~Dg>p?*NafMaBX z1!RlT7v0tF^RDDFo5pPcea3L!vj zs@y+HJ+V4gQ-DF~8cC*?7xqya$uBx_D>^5|gFiFg3V*(UZrUN1-D1dQJV=K=&-#q# zrb8@$hDF3sP?vWS$=ABHniu)bkgFa#xt{IfMrzj);{S?5OJ3)6{GK_+LG7!hwTP{) zei-Vfqqo{9w_>!fv7V%qjGa9G9nQ4uP-z++)6&E=Kp#`GT8r zd_b*cMOVUxHo7(Ise{B&Bk`l3gz(nySf4cLZ2YgjgZ=9YPWi}%+x~IQaAFL_a?%vy z&O~B`tgVS&>IACK>%satLjaKVz)Kfv%1tKR@hx^<1ikZ!VzFFN@@l9|@W*=FvR>&^ ze@JAZ#_tbSH$KHO8-c63J}4PW^rYsFhaL(mJ}bCX^%;Hws0)jFId%}W=#59tOJxyd z)l_RQ!}{29aAuM8NnZV!>GjX=9wh>Nn8W_65=mlm0bqWySDqWNZ0~{CGT^oz^%)2x z1!4i=VPo+;!2r-Vk#3|rT_l91k(3bW64;;a zR(e6|IHzXL<(co|oq67Qzssk9H;o4YLeeDT;o|=d9U_20gjNKW9G4ik5J@9XfTWTB zi^V}mVDtZ}fG7eau=Fo31R)6_M5O<{A_L;$6(R|4Ku7{C=qVBKfAHVuECh4{pAT9d z+HY+ukCdi|lYeY=lN^fR;o~5BUl1S(1~IU@opAU{CswZbJcqN!uwA6_^<)(p9RtCY zYg~~?Ev1^i(ttc%bu2J?YeH3utxZIHv|{_zWAd#^Iyy#ghDh4?YiW;)W^1sUz*85^ z$Oyq)pnnQy*O9{Hz>cY5lsAuQ3iBt2&D3>|-f2N}O$PX`FyEZPbP6FR;YkE5>Z#1_ zci>x%jKTS(!NsT6TC-XV44gTASvZ$YK@l$dLArTWY(=2)cqvhK{!fg`)yl%F7F$h= zA^!E%0aMB%<_+>@70#H`ti4}#Hl|wPmyzq950YY|z>CXfIrbGHzwmESt@blh``o2v2H|mXc!|^CNe9fp`o%(Z zbl>ac_;Ry16mMqgY8ezoe`0EOJDmNIx&WC_b)Zeh>Ghx|)kEy-c%{X6zg+3aLK=p2 zeHqjnQ!iK;#CZC1j>N=k0H;LCFU<8jm0OT8_^4KeXujP*K=uCXe!r7zLfCO1IHu}O__9dPkH##IIwr({iLY+#2vF^i7HQY;T0e~ z25NU6e*u2S8$hP%!R*k?`O&#l?71S52owmb2_qEVSCnh~ARP*yHgcp;E4%FZZT1fx zG{XU5|7KLYrrZZQm*RNx484tyJwE(orW<}?h}X9XE?yW_RPrKM3M+^54KnRGB>d#+ z_vlU1bhw5ecbCV|XxD)Th~1vTBtntwggGX)IW8D}t6lGM{1265rihh`7{E{`ZUm8@ z0Zd9G^_}SR{7-u+;D^1P{S|0e-~gCvDT7ejB#p%I57aqtu9x_ z&mkvw3DSf6rb}xZoFxm5JXf)6 zmp{H*VvTkSDUNVP4*2L<5y|ga*ndtU$y-^zseEr7QD6MAE{Vum4+>{*;@!W-*WGjW zx>$~h(rR98X1zqw|GMucK1?@`Z9yAorFG4vSD;7UxAIf~a;BeWQue%(=fpb{h8ue&K!|n5&N`)V~_HSpZz?q+Uw=M}?btmMLfPzg= zqVkXMo%*Q@d*ae>BdF9p&ANmZ7y|rv*GRsESc^_(J(lHT((T0imM={cGW43j7Bl0Y z-8wd|JwA6A9MTKVM%+9Su({p0;th({bb=QsNM%qGdCoG;!G`Y;!2yjD=dTWPy|GXE zvLfPKt$CYjS!c&c0y2zksRr6zI?s}Rax3kx<-09c)4_DLqd60~m+5vA4X2a`U~xWm zEgn2deIK(H8?*`Q7W-U|dG317&opxk$CT4QuVBYY@L1Q65gg?gWH&b#shs!Ir4)PL zS@tg9zP3pkdgLROdC@Tw&-EkZHJ`OCfD+`G2{`yxr(US3w?U#hBL)9HAws#`9$FZs z-?M=#sz5BZb!)r{j484iDnqpS)-j||ZT0|$Zyicp)0J7m<$MzORf>eOm494mS8VLm z!-ozIUTMv=BH&iL>eX1gSQP++ctK=h=$in`goMPSx@n=g>&~PZRUgwW0s=#~y75PQ zCxZB31P(NCGK7)IHM1wZ5Xy1xf9@t|^=2l!yv2CV?qB6&*)~4Uyzh}GN{m_sGqGsm z{9XJst;x>!{Nc-05_O(wV~?C+yV%R`CbudwSG*(g2o4xAuXf2KRpN0{@V1APIU}ZZ z&jU)TU0m&l5Q^H|&*;{kG6ad?gGsun-p~(jexHa`spY7z zT9va#h|XxQz-+>O;11%flZl}D%H7p&|C{b+RY?WN(KPkf&9XYw3T|k^$7i!rc>qj< zNFoZ6l522`|L?{#HiGAFnHH21Eoau=WyOBx)8BS~FS0PU*#cevobTWq+2Q+VEsbBc z)Jf*1_6nJ)v~yXuU6?`&+IAX*HvK=fl4zVT=wBjT>sc1=vIkznyrt{iLO11cz?#Lmxc2nEEpzvuUSYUcO{*Ge^_JLa|8#4Uc5l~ zmukuO@3ESQDlDTKr+#9J*)mP4j3idPZ6R{6;j+#E=B`>9en+&y)tNX)P72AMOe|QD z`+ds&$Lwo`y|$Him|4b(35YmYX%ULu5Fw6LiR@dg9C=|~xrD@x8+p9#;$&deMe~AJ z9KIZzW!^L?e{&bJU@kt>JCW~X&xlk0fM9`YWqR5zG?SF)m#6ZPN|61pDkY`FQyod= z5ig3Aj2qvx!R7?ytujqHX%pPPF#M+kLy^Rf)1-%wpU*+wr5L@e?1Kfz_L%=h5706< za4am~=`txRzR#QYekVy(*peO9ib_nqh(<$CHAnyLIp)MYvtOi1ssa|XE&4kFSila_M``(xW}klueD)9 zz_n4;qf)`f*0|R-kJxzU&g{xSCc_wqGPA6rl*q%G`mG- zqIlo*rmX0m;c&0Du>iNK)QiFI)9%pshptM4sqmVkk15P+ z_)oRdmn!=qyx!VA>naeF;O)bLid>u{y$eTHI|M3kb8)vEv{&zQS@M zfJlgcJ+zOV6P)0I*x*q%M{1$H^~I9U1d$^_2D7}Yt(7Z`*5i_98HV%5*v_@fvyR;) z9UsCf{ltxrt5-^){`j-c5WHhL7@u99_}4DpCN8O0nL3y6(cVvs1e@O|sGr_D8LyCm zM1Fhbfc*XK4k3Qoc<}qhB>q`{!u)@e%!Z&(#Dw8Z^6|Hbn;FM_QQXyjik!jVi&!Y6zSS2_SVpjDHPGjr)EHw(gWD8LujA3O@5bphMtWM&3;1hGjj_VSMl<#j|86u8wib;w$ z%ZP_wPfTWMbEf?SkV%B!Q(d%Gi`i(a`6X+B_*eQ-rFe4Oz;e}2^^#`TQH18SiFJ#Z zHB1V~3pVPU*^s@^r3Mdnrjo)|h)hQ&#U@-{}{v-7u?U>U0e-R9usOy!AGZ zwTJ>Y;Mh1&Xt7HzR?NC@L29(j(X?BXA_&jv5(U zB^sIhbB=Mv1>oezQ=tW!cYjLw6?;XdFApn6-=l5h(vHqiMr)Os7Q*MG%z>dPb@l0I zLw4lix(b6m60k1K_PIZR*{BU6uTyED6POkii7M@M-`@q&M4KeC!{(Z*j*G_;I|vf9 z`@t!a67fEvyU?g~(kI#Ue2~blM^P!)FDeslI|oPNo#Q75q?c}7a#>Bk8=vcgX;xMi7@# z>!_lYnX{*i1V7i=`-?X!^>?1wzX+lMu;q?j^sh5JvdwtqQVlC1F{1#&b-}Z;QMd_< znmC?9TOYAnQ}+E`Wb_SZWJfTQ+wV6K3$#h%Gxo;1*u5j;i+|D>pufiNNa*z@qn}rP zT#y+C{d(l)yWsgQ?qR~$aoY+hZGZUOMOgJE8ce|bg9bQf?}jbzM*$8&B)#>9VWHa^QKE+bFqXYIF8#+-oUQC`6wG$=&!QVrvynNQn z6+mUvaQ-l`aCfr4B$irtY}67LgrAdvWrQK}&9&RRAkBVKlLOpj`+chjn9^t-hPONH43< z#W)N(1>#=sF@le1uLY{?J2u%mL3%NeFvYaR3mBLT)cM=4gt#FTqzK{!0f-0%De&=_ z32_MM>cPH@V?z&p!Q9?8YBl=PJ#@fp{(jgB34Vx5(yn?nmRnh~E)iJ|4_?hUh^+z*z(QPJn2eUjr=@+7ONTJ;KmPFkD$t_3l^R4itZz z$*}wUm#ZKU1b~LoLH@TD1f>O_dFY_FkvD-(G+>|@7=G0CAr&ZLM(Og~$y@|ojR?Wh z5WJ9XK}?^W1bLX&NlVi`6lyEY%2qBXF(*pql|TXN@YF1ZT~p#|L|2H}GT_;Ph-#q`_|kw=gwb)$R)i5lCYiKjl zbd4!%JX>_0VXEH8xFjAfa0sv0Q36uq3oWV{!KpVKj?2v_>OO9j4SL25*EMdSH-ZK z+{O@U-}9k1VqB8Hst9O9g?l}0H#zg@SZwVr-A+0R1s|Lc*kqLyJprA{rYRR!~td)&0Px|rMSz?Zj zYMa4-7eU{V7G!l$gk54!A2>nD>WNKXYTC;ExP;vvC*-VO~ANCSoP0)}WDKTUF>5F7? zNs~loQ$Ozwrr}y~60uu+;U~OOB|}Lt5W2Ux3XWZVoq7KA9p(w=DnNp78Sj&>k3~6g zjJCT?ChX_*q9nY-+uS2V&$-ylk;@56$~f-oAMIc_{`}-82^e zORaLjd3AA9*Pp`#wt6V;t>T_o3eKsy9E3a*2W{!EJO5W4wCP|xZOR!YoYmiSjQ@8E zi~$Hb3uVvX)&CO3f-{Z6xXYmStI(gMq`oNA{}jd0VaSTtIqOLD(09VOX6oZS+jlNUV;{llXHThPo#+4N`eROBnRl#Vi7b?q-zFw4 z`532S^DuC>Ds)s(eG$zX3LV*n8FyP%(|yQIQw#Af$;Up=;-fquG}cF?=uQh{Y>khP zkC?M*_TGq8lw-yMk9)B-qj6Trs83OgdX8lMeDPSP_Sf$93Aywm@ZbSmCwsSw8vwZ{ zJao=dN!%^46m%giV{dkB#(QP;ft-{2sBGlDH!Tkr6(YM zpx~Z8t<*8T1Fs{hwO15}LI1Ym)-?pO@cc9aHr05n*6CTv>o^Kg>NtQd+f?$Xz>&jO ziJQe__b7XX!0ZRH7H>3Dij%Yk*LqzcJQxc1G@FAjwRiN_7`IBII+jtQTzT&NUqz9^QzHj|Rbw0<}t> z_z~46n9A-mf8+Y(q@+RT&oIM0STlNSYp%)Ks>bDe^sw;tiQ4orp7Sdg`rc+v+2MjD zdZw6yWW;E@h0IN3oBE^>V^=Cm;b9lJuir@yzPzn;)E6=!8A>`TxE)#Ets;X;bKPxC zgRy29(r-E0hH1veM^AfH*+h#v35%*K&vApYJ;Kr2Ow6dzAh~b_Z=@n5gcpgF9->gY zTgcQ}4%v4`yNub0m(`|DdLq2T z1(dtVBCDLQp2G`pFKF>Dag&Rp;3yH!+J)e(qE~nk-uqt~>Y8I&X7e)N{W+PN9=2fh zgGC?GNI}z3rENV`gMkOaIUb49)4+HG}$CNVr7vcyfH8 z7iL#s=F9jr_-x`dn^~>03|W7=^eFIfwA62)XWTU~bk7^3jOP3$S|}?9#>)V305}0~ zfCm5!THtrYhPeP{4T5rBi8paJD7dy=Fp;JnG0}iUp;gcrG}EsS3k0MEfG_f9^-Bj{ r3=QjCh}WU@uze|v3vm|9b3n;kj)b?Anzw)Le6qpl#FoFq`0s#Xsf(cp%2`Yw2hW8Bt2LYgh2}=Zm2}dx32}3Y~1K$P- zDuzgg_YDCD2B3li&@h4n%mM)bFoFZc1_>&LNQUZju4r)&f z&2dtI$@s64Id_;%s=O&W5h}x?WkJvOc<>jpS7uHW1VMuRb^k0eHf zZI%BJ=22B}?XSz$t9QtR3TX8kfY&7)N<3QM*;pxmG9qG4|5pM$XOKPNVbpIcn{7rJ zn4Oq9((#jSa%^HRd-{g`3aOqn&SW^EXyD~K7PW|^id)iRt$${V|FeKV0Nh~f;J2B_ z2sAzq)`y+he!)}jQtIeLPX7O0oN|lX>a0r0YzdBRO*a-j&nb zyXe*fE(}%+&6Tn- zRU15FI8t~Fjj2mtYxZIUC&|GyK9(YrFr(GzHQKBmzOaBt%T20l3WskV1zy~LvQ6F^ z+f@zH5oSP3&@h4pTm}g$hDe6@4FLxMpn?TW1cC)gFoFd|FoFd^1`8^NNQUM$Gz3Mz(3hW8Bt3;_c$4g?6R8xnly_&N##0tf&Ef&|END#(^(cD0wB z0QDvJ>-5rs32x=svnL?-8wd4&Jubl}`!i5t8A}&1Tz*7PSNfY=^#XR>qjpyvNR6$i zU8MenH#rSRFSa}#?WIVUNf=atDiRX^Xx^RiH6#>v5EtE`$r*CX8~kD;G>;=dSdcLI zDt2}E%dYStwbwXXtgE8Swx`l@P_`tDl4R`@3sK6`Ju|H&j1cJV{*^<2P#iQRde{r! zUKhzOns;QBE4_-mpz{(QCn4p!VIaZ=e%+N34>IAwC$c*#cC0ZZiore9`sB#UBQ%!t zKG<8b! zYE5xut8zNsW_gKP=85jrU_pLd*DFEv)2CI?G1_Lg;ctPgHJDp4j0>~c8(NX}?~C&=%^!oU=I!(pg9E9B%+7Mhk_P8i{n;%djK z5SUSvExm~}is}QuPS$+vJD#t4kr#pzx+%P4zHyYXIHaL}4N^q{U-B#4CX0rouawn; zj^_@q<~zz9dOPH8K=H4hgQy^65@Q8>3Q`%n`x*o^H{n6THxga`U>vP^MTb^&^x7jp z%SqIWrZ=lUi4bOFZg*&NcJz0-&x9M4XA_X~c#A8tu2QvGzdfp`Ls*OJ1l$lke=0y~ zagn;PL^8>L4lH&Y3jUXowbU|Fe^b=xof1K_hLHusjPoD{$ntru>II$@*dDN33LYQ# zi@OKg1+DS7S7!~SXJ-ywByBe5n=S|mG{ABI4ZoLW(& zC+k|3yRGyK*2xjL4>2*ATE=2q)HlxbLj5A71}n^eBd#N`?68gM)|66u+tu9zFLJxg zL{0=N?Dd@gaxPlM@m*HCxQp}GGbnI+Z0D+}P=P{LOT5(06-nt<$_XSSsDXPmlMwe( zvkp=6DZUR7t+={KKG}EF$2NBB9Hj}hfd-{X}x+;PwjJ7@ZO5#Az0t zK#EVP<=Ae<#839T!%t~~#qGXi7s?jwO0i_d`z;HWXXY09^Z53 zk%@d#bj|nOn@rqtp_=l+|DaYw6vp6Ov!vmXPm`$$E=N1-HpRG+Y?!9{ z^IR|uGE%kU3AWmgZpxk=pg1X4c>ePya$*dQZ0N$C02) z2--1-g9v@W`qmgbLB`{ z{F4(it~?rrEkExOg4>3w1CF{Z6c3|xGdJ6@uY~zg+nw$D?6Yi&(j+S3))H^=Tvj<| zeD1yMs4(MlByN)P{LayyJU%9Q#I{kPX`MP&yjCOMZJ8@v_wc#ao?MFP!D@G3ajo|1 z)X)A=w?D@mLz%Ee#G11#Nm-WP6-)POGUt-b3!biAZ?k9n_#z(4%~xgdP&{&z0Kl`P zU81oWh>Jvxt(4UY4uvM2tG8HwOU8@6ENVumdt|n!`wFRZuX1<#h269b@;@zo5OM%F zQH@j1^NKd~zL39GoD8wKgCJya>C^F7IPrDtLG`19QwB}Ms*RBe!Lt-iNr1Hcbx)}j zcO4J=>{5}UTqmspx3q-Y^8k9@?UbXmTixyp0>i!v$@?sf%{J5uQToHa>DfvzWT3yU zkwX7j`qILz_J^|Lhed}W1)mDYQ`SG?%a6RGD`)_vyF^A*T4jU7+99?TyygWFvgusx z#ISQ5AG~ z!p~}Dj}jW^QrXVzyBSjfciNH}i59E9mkX>a8h`gk@8MIQfE%iT-s)N5rxs9B(Fn7T zaHL`QQ?FFJPiIx~I(oY*n5liN4haoHf5F;Y2NLfe8akF?BPeN8J8B3Gj}!%5=1=+8 z;2h$NveZlk^J~Z6HPR9fr@iZ^Du<5tF4CC09OJQf{U#6jMco~whtY=VV3G9$KUaUm zTh;)81psR6e8um=$iWVVy&~Y2m5At-xhp|Q5vZvpV)>jy>~^EzutSQDE55a7$GSe@ zMtJG2Pj^+DqE^g`2-So))MDIP1wsx~U|_c)Uqa)+uYcY7zv2L=f^guZ6P$2T&mpJ& z?-V!;06YnmPN4jMiK2Tmc;91;_d<4$W|HbpGyne-g(E;1DuZzpYqt4N{1q}P2*QDS z7;i>Y+q#3q{La^tSivO4t^0*(>}=EBi)(K2FvCksPf~3}?}WeZx9Fr-NQGX5*Q*Rt z19y&}X1C6&_8UYR>{T?$JZ^24d^^kggY{dkW800qa(UJp;aSPM*BjUBvIBUGQyQ2! zqxiwV%l^x$*`HUIfMBdKq<=8l<})u3@_UAfZi9oap4s0N4xvXpTYbE{YPQ$pUu`Ql zErz_`?UHrHieDaCZO@Vt%$b+5v3wKPDrfY${FG{fTu-r``w}VR@Bx0r5wRtCJ-(*d z;0diE@$$0!SG^9}3%l~-K1~{dtvlzkgd{=sLV`V?pAeKjlHb1x@=S4;)izy`S=BR7mKDDfS>|z!QMv7&$K*#+nbzu$!8V z47%5q{RVHV2O0$zHXg>p3iE~6#k3#yz>uizp(wS_=2>|Y6vp0FyGV+|MI`x6blS|A zsr?@kI;#5i>W@+cRFB`06iQ;g@Z2MZ$*Hkh)Hdmw<85jk+t#BsCOSjpZc3bOxD|0*- zwgENq_zRJrJaac!cfY( z<*I^Q7uw$XiUgfIR>8Bs>M$N%q{P&;EX_|)f}cTPCgY*~s-feyaKX**hh#I;-Kzsa-!z%K9_bD~*(w_tJ8QV)HRi245KhUJqew0pmviIrKFrfw-T<<9;hbY^6?%+RbB!a~^ z>xG;pdf(bu=pQPOn1?$y>VRwZ;g_Y{a z{*1^BDTj2hXzJvV^#EPp)Gz=5 delta 2438 zcmV;133>Le6qpl#FoFq`0s#Xsf(cp%2`Yw2hW8Bt2LYgh2}=Zm2}dx32}3Y~1K$P- zDuzgg_YDCD2B3li&@h4n%mM)bFoFZc1_>&LNQU{4$JmO{9AnAxe z%pE9Du}mEHoN27Q=W*6s6Ro{so@{L#X}!H4N;*$z_hJAXG!zyA@0GM4$^|As$Ba^! z;XSa!%p$x+$x0J^$|SYjEn5BlS5W6Ibtw7Ok~>U*Iewul+*yTwq+)-Ot$S$rQzT2W z701@Qzw1GNSYyMHY=J@{3u%8ZuNI>%*65q#tD!VMqfj%w;ro)L+Gn8R?ye&)tsK|_ zy=t>GI}}%Rq{_x-`1rrkVSk7{n5=Q9eSEs}EL=|M<11ILj1-1u>KAY>#2Ap4Y!6%I z%V0xkxPC(9VOs9HDHGaDktpKK=DZeuT^$OBDZ5dB`j%-X(~EV@mj!hj3n#21)H0A1 znRnXL|LitM%vVg~R7o;k1E76rL@xvVA{A>ZquL$WENfz0c?``|y=RiACPC5i zKVk-dm&Xe*EGAvC3eJkEAn`{d-q7(=Kw<#f8wm?lnE19;?y2@o*_8>zTsE?CVcIOx zC>qd#F%Y*^{nYOMLT^`9maETFAz7|ML3&2I7-%>IxP(Bvg|`lWfY5dFNQNs@Dw)w> ze#3gH&U;Yz+hfH8$JT<7Kqpv8H5O+#kDw)$7LqFltH8Pn~Jd~N4}(3mI2a*oF#tp(v=g;n!4;Ew=PriKU& zb0{20!r08ZwA_{tI!@S9ycsk2XlL)v(W}_ZJ~t1cgT#}OkAHLkULrdU*1eWjebgy` z5LLb;y7L9I2jo4tN(7YdnCJxVL=B>4X+z?a-iN?Ajfj7AOa7 z*6#rD#-HuK&|l$33)p@*?60<6#lQ+N&8%E-j{*tA?tON3N&ku<738Grt^wSc$mC4G z$cD;p@mWytimb?L$Mam^s0A!jC(~PhRLuq)dZ1Sl9be$C6b6Z&zzw;mNmO{8RhQ@I z>h>*t{n|$s5}tXeH5&+Vui}_dVe)WLT`ijAJ#EatZwKdQ!IuV2%p+fcga28mcn|1c zyPb@{!B0)C;2C%Ix7$(E6`1EtlK$|fO<#p=NGTXuCPTDfy$zID@gx3eZI8HrxS97o zobON$DDHXY)i8nuTm}g$hDe6@4FLxMpn?TW1cC)gFoFd|FoFd^1`8^NNQUM$Gz3Mz(3hW8Bt3;_c$4g?6XOYnNQTJuE$0tf&Ef&|DK32U6pmoc+) z3GEu~2Kp!%Y?s5n$^hS7=@sy3N5NC(DIt;j(9g}Ylg={C1fRmvr~%${VYBaZo_MbVfD99!bE7NzR% z5Kd#Wou{d&?>gMzVCJS?oH=@0c6`BV_q@Uvq`8)!<=KaC;HD-_2qpG#Dw)(C zA9;5urF@31Xv9%=8qQ5M7I1`N1*Xt@qpY(S(kt3EXW>@8oz@G|29aC;3T5RJdB($h z5KaloUq8Ia!F}8BkRq&qYmN>7hdRM3xDl4jC?(q_5ofs8^rk_W5SI4E?ODyXzG?3fn0_m2KfskgZe~v_Qa5#s>aQVAu@m z6Jj?F5LVIZxHZxE1dc^+MlznCvV@a{L*3<(B*DG#kQOmhXiU(%39 zWcN1AX%|d9m~IT1-tz^m0>~f64u^_YFW?g|<;1;YZ|P7c;5EIXAg#*yxT_g3M5z~S zvq}PEn<&N$crVW7dhKjtO~hGWx67u?7Jvry@81Y%rf8xiC+#V$%mmUn4-Q|lFmutB zla#6i#+pZmJyCCep)E~0d)yzfW2alUl}en_4k}PZSCM>{OTtwN5~ZVj*_Lp;_`0z2 z(Xh>7U*KIFU0un1F0N!EAml{ z?|#Off5UFyQr);NU~pE!y%9H(KmZnfe61!fjb5C=BWY)Ud=_c2cOgzjDx89elDYdy zfy6zX6p$*%%8j`1`gN%gS^+fVvuguBpZi#T7DDwn;ZA~NL$h9r3zV-t9&`rB9=SwA zTjgXpFZ-ugz!n$oz-!UpChPrPIT0ruJN78EZAUME=a8`@!`~k&M95{U!0QmhfaDOL z1YuV)^Oj7|uA?|4TCt`M0J6hG+z3gOgxv!^ZzItdP5OxphuAJdyDH@@PqoRB<UAr>3A& zo{q{TC6&iE%c02Z_#^Uy#e!gUDl}S5KG@rlTa&2?E=L($fBS6PWQ#G?>=~AZ65$LL z|2{A=Fd;Ar1_dh)0|FWa00b20bFT$JkH{yYoH6LjuRN%@20#4-2;8v43FM87R00AB E018}lU;qFB diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/README.md b/libs/ssl-config/src/test/resources/certs/pem-utils/README.md index 28602ac097f78..576b34317bd0a 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/README.md +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/README.md @@ -3,14 +3,15 @@ The certificates in this directory have been generated using the following openssl configuration and commands. -OpenSSL Configuration File is located in this directory as -`openssl_config.cnf`. +OpenSSL Configuration File is `openssl_config.cnf`. The `alt_names` section provides the Subject Alternative Names for each certificate. This is necessary for testing with hostname verification enabled. - openssl req -new -x509 -extensions v3_req -out .cert -keyout .pem -days 1460 -config config.cnf +```bash + openssl req -new -x509 -extensions v3_req -out .cert -keyout .pem -days 1460 -config openssl_config.cnf +``` When prompted the password is always set to the value of <NAME>. @@ -18,13 +19,18 @@ Because we intend to import these certificates into a Java Keystore file, they certificate and private key must be combined in a PKCS12 certificate. + +```bash openssl pkcs12 -export -name -in .cert -inkey .pem -out .p12 +``` # Creating the Keystore We need to create a keystore from the created PKCS12 certificate. +```bash keytool -importkeystore -destkeystore .jks -srckeystore .p12 -srcstoretype pkcs12 -alias +``` The keystore is now created and has the private/public key pair. You can import additional trusted certificates using `keytool -importcert`. When @@ -35,91 +41,141 @@ keystore if necessary. `testnode-unprotected.pem` is simply the decrypted `testnode.pem` +```bash openssl rsa -in testnode.pem -out testnode-unprotected.pem +``` `rsa_key_pkcs8_plain.pem` is the same plaintext key encoded in `PKCS#8` +```bash openssl pkcs8 -topk8 -inform PEM -outform PEM -in testnode-unprotected.pem -out rsa_key_pkcs8_plain.pem -nocrypt +``` `testnode-aes{128,192,256}.pem` is the testnode.pem private key, encrypted with `AES-128`, `AES-192` and `AES-256` respectively, encoded in `PKCS#1` +```bash openssl rsa -aes128 -in testnode-unprotected.pem -out testnode-aes128.pem openssl rsa -aes192 -in testnode-unprotected.pem -out testnode-aes192.pem openssl rsa -aes256 -in testnode-unprotected.pem -out testnode-aes256.pem +``` -Adding `DSA` and `EC` Keys to the Keystore +# Adding `DSA` and `EC` Keys to the Keystore +```bash keytool -genkeypair -keyalg DSA -alias testnode_dsa -keystore testnode.jks -storepass testnode \ - -keypass testnode -validity 10000 -keysize 1024 -dname "CN=OpenSearch Test Node" \ + -keypass testnode -validity 10000 -keysize 2048 -dname "CN=OpenSearch Test Node" \ -ext SAN=dns:localhost,dns:localhost.localdomain,dns:localhost4,dns:localhost4.localdomain4,dns:localhost6,dns:localhost6.localdomain6,ip:127.0.0.1,ip:0:0:0:0:0:0:0:1 keytool -genkeypair -keyalg EC -alias testnode_ec -keystore testnode.jks -storepass testnode \ - -keypass testnode -validity 10000 -keysize 256 -dname "CN=OpenSearch Test Node" \ + -keypass testnode -validity 10000 -groupname secp256r1 -dname "CN=OpenSearch Test Node" \ -ext SAN=dns:localhost,dns:localhost.localdomain,dns:localhost4,dns:localhost4.localdomain4,dns:localhost6,dns:localhost6.localdomain6,ip:127.0.0.1,ip:0:0:0:0:0:0:0:1 +``` -Exporting the `DSA` and `EC` private keys from the keystore +# Export the `DSA` and `EC` private keys from `JKS` to `PKCS#12` +```bash keytool -importkeystore -srckeystore testnode.jks -destkeystore dsa.p12 -deststoretype PKCS12 \ -srcalias testnode_dsa -deststorepass testnode -destkeypass testnode + keytool -importkeystore -srckeystore testnode.jks -destkeystore ec.p12 -deststoretype PKCS12 \ + -srcalias testnode_ec -deststorepass testnode -destkeypass testnode +``` + +# Export the `DSA` and `EC` private keys from `PKCS#12` keystore into `PKCS#8` format + +```bash openssl pkcs12 -in dsa.p12 -nodes -nocerts | openssl pkcs8 -topk8 -nocrypt -outform pem \ -out dsa_key_pkcs8_plain.pem - keytool -importkeystore -srckeystore testnode.jks -destkeystore ec.p12 -deststoretype PKCS12 \ - -srcalias testnode_ec -deststorepass testnode -destkeypass testnode + openssl pkcs12 -in dsa.p12 -nodes -nocerts | openssl pkcs8 -topk8 -outform pem \ + -out dsa_key_pkcs8_encrypted.pem openssl pkcs12 -in ec.p12 -nodes -nocerts | openssl pkcs8 -topk8 -nocrypt -outform pem \ - -out ec_key_pkcs8_plain.pem + -out ec_key_pkcs8_plain.pem -Create `PKCS#8` encrypted key from the encrypted `PKCS#1` encoded -`testnode.pem` + openssl pkcs12 -in ec.p12 -nodes -nocerts | openssl pkcs8 -topk8 -outform pem \ + -out ec_key_pkcs8_encrypted.pem +``` - openssl pkcs8 -topk8 -inform PEM -outform PEM -in testnode.pem -out key_pkcs8_encrypted.pem +# Export the `DSA` and `EC` private keys from `PKCS#12` keystore into `PKCS#1` format - ssh-keygen -t ed25519 -f key_unsupported.pem +```bash + openssl pkcs12 -in dsa.p12 -nodes -nocerts | openssl dsa -out dsa_key_openssl_plain.pem + + openssl pkcs12 -in dsa.p12 -nodes -nocerts | openssl dsa -des3 -out dsa_key_openssl_encrypted.pem -Convert `prime256v1-key-noparam.pem` to `PKCS#8` format + openssl pkcs12 -in ec.p12 -nodes -nocerts | openssl ec -out ec_key_openssl_plain.pem - openssl pkcs8 -topk8 -in prime256v1-key-noparam.pem -nocrypt -out prime256v1-key-noparam-pkcs8.pem + openssl pkcs12 -in ec.p12 -nodes -nocerts | openssl ec -des3 -out ec_key_openssl_encrypted.pem +``` -Generate the keys and self-signed certificates in `nodes/self/` : +# Create SSH key +```bash + ssh-keygen -t ed25519 -f key_unsupported.pem +``` +# Generate the keys and self-signed certificates in `nodes/self/` : +```bash openssl req -newkey rsa:2048 -keyout n1.c1.key -x509 -days 3650 -subj "/CN=n1.c1" -reqexts SAN \ -extensions SAN -config <(cat /etc/ssl/openssl.cnf \ <(printf "[SAN]\nsubjectAltName=otherName.1:2.5.4.3;UTF8:node1.cluster1")) -out n1.c1.crt +``` -Create a `CA` keypair for testing - +# Create a `CA` keypair for testing +```bash openssl req -newkey rsa:2048 -nodes -keyout ca.key -x509 -subj "/CN=certAuth" -days 10000 -out ca.crt +``` -Generate Certificates signed with our CA for testing - -  openssl req -new -newkey rsa:2048 -keyout n2.c2.key -reqexts SAN -extensions SAN \ +# Generate Certificates signed with our CA for testing +```bash + openssl req -new -newkey rsa:2048 -keyout n2.c2.key -reqexts SAN -extensions SAN \ -config <(cat /etc/ssl/openssl.cnf <(printf "[SAN]\nsubjectAltName=otherName.1:2.5.4.3;UTF8:node2.cluster2"))\ -out n2.c2.csr - openssl x509 -req -in n2.c2.csr -extensions SAN -CA ca.crt -CAkey ca.key -CAcreateserial \ -extfile <(cat /etc/ssl/openssl.cnf <(printf "[SAN]\nsubjectAltName=otherName.1:2.5.4.3;UTF8:node2.cluster2"))\ -out n2.c2.crt -days 10000 +``` # Generate EC keys using various curves for testing - +```bash openssl ecparam -list_curves +``` will list all the available curves in a given system. For the purposes of the tests here, the following curves were used to generate ec keys named accordingly: - +```bash openssl ecparam -name secp256r1 -genkey -out private_secp256r1.pem openssl ecparam -name secp384r1 -genkey -out private_secp384r1.pem openssl ecparam -name secp521r1 -genkey -out private_secp521r1.pem +``` and the respective certificates - +```bash openssl req -x509 -extensions v3_req -key private_secp256r1.pem -out certificate_secp256r1.pem -days 1460 -config openssl_config.cnf openssl req -x509 -extensions v3_req -key private_secp384r1.pem -out certificate_secp384r1.pem -days 1460 -config openssl_config.cnf openssl req -x509 -extensions v3_req -key private_secp521r1.pem -out certificate_secp521r1.pem -days 1460 -config openssl_config.cnf +``` + +# Generate encrypted keys with `PBKDF2` standard + +## RSA PKCS#8 +```bash + openssl genrsa -out key-temp.pem 2048 + openssl pkcs8 -in key-temp.pem -topk8 -v2 aes-256-cbc -v2prf hmacWithSHA512 -out key_PKCS8_enc_pbkdf2.pem +``` + +## DSA +```bash + openssl genpkey -genparam -algorithm DSA -out param_temp.pem -pkeyopt pbits:2048 -pkeyopt qbits:224 -pkeyopt digest:SHA256 -pkeyopt gindex:1 -text + openssl genpkey -paramfile param_temp.pem -out key_DSA_enc_pbkdf2.pem -aes256 -pass stdin +``` + +## EC +```bash + openssl genpkey -algorithm EC -out key_EC_enc_pbkdf2.pem -pkeyopt ec_paramgen_curve:secp384r1 -pkeyopt ec_param_enc:named_curve -pass stdin +``` diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_encrypted.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_encrypted.pem index a251de23f4879..6dafbae6a7785 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_encrypted.pem +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_encrypted.pem @@ -1,15 +1,23 @@ -----BEGIN DSA PRIVATE KEY----- Proc-Type: 4,ENCRYPTED -DEK-Info: DES-EDE3-CBC,BE9A0B63873F6B7A +DEK-Info: DES-EDE3-CBC,0DE7DF3D64FBE0C5 -lGSpJkwN0J9p+2Wm58706EYz6mmjgz7okjMtsR87GMIiK/wVwjKmyUa73QTVVs15 -N/EOySftBk3VUSPx9G1ZMxKpp3l/hvkIcsDDfCPAZFqwdQQJ8BEeF9jDd5ZoI6Yz -Yus1+X8A1OpX1O7PCZ08e2fLeVuEWg62/JQcNukuvL7AKm+qa1sda5/ktquv2eMZ -nbTiOE3Xe+uDsgABQdy1h4EsMEaMdE6QrWdxLGWDGcdzSzfltvnhmmsK2CQsV4e1 -huQeb8ylShJuIr+mgtKgUlIlJwSd7ka8hIdmGt1LO9+NZOPUGN04daQkETtfwsmu -YIYkh66CuLbT4nZny64Spa7AeINSmf9GA72/QtRSo3M7Khlw/95Lz24iKAy7/Lbt -AKYenSQeJtlNgWzPcDIeUrIzXXmAXHN5YGMg/7X0h7EGu5BxYbLydkBRvSkV9gzU -Ms6JD5aON10DQhjIUwUcBnhSnwPPpIVa2xf9mqytkcg+zDgr57ygZ9n4D+iv4jiC -ZJuFCFrgeqHrCEKRphWRckyhPo25ix9XXv7FmUw8jxb/3uTk93CS4Wv5LK4JkK6Z -AyF99S2kDqsE1u71qHJU2w== +sKlL1ZyhrDo/7CF2bVHPNJMZqbbfQ55ZAB+T2x63j1ssu3c9arMVFiNTm3gl29DX +6PtYopDglgZhK7YYLck5batMjSqwpl+lm6MgTDqzgZAMcCPl5KJd0ScuCP9nw/yE +uAzBBmhhHrxUtyGLZWX/RNq+pIv3rMs2MGrLjidJW4VkIXczEjoVbayzHqdOHzPa +GVeGfm68ykFO94KcJZWsGFQMCtm7DyYLNusC4P6O9hpYNsK09kqwqMiQFKUstIO7 +lyemCDCQa2wikO22wum6PgrSosIU2CoSo7AYgb7zpGOJdtQ15F6mC4+NOFY4Xk3d +N5ZB7vc/1Y4vqKBDMF9DANtK8LjYt13p9mVv2ZDH48qM9EadCtN26PeARlqwFBJv +hrVyVm7GOywY43XTVQqrF8MVDggunyGubEH1Endhh0PmVCL+hG9djQWLuCIUGON4 +6/2mA7dbyxzRi5qgC00BRjrpmti+vddVArFCoKvaOlELaGR1mSkpeKdfuI+WeZ6u +/GJ/tvG/4yWKoRxxsZ5JFj1njMVhgsgicaHV3r+jF4SjDkjDJ4TXcU1QgP0jnNEd +5O2Yn85MuUg99T6r/3lgW8WLelrRKuGocYDMi/huWaBwhA+FsB/5eCm0nwWgNpw5 +Z/aylu/XHqx9pE5veAzXajGkg0z3MbBp2Ig+q0XWxznbQZnMZSuLMlnIbmo77v86 +pAoLumTBaG8unmwPc3WDvyEC4/znx7aJwcLqMLwnDB7+qtNqG6OLzskARQ5+TqXk +6SFn2JX2EhJZ4X3yKqDs93haSlQlOxszEvoz9J94xtHdeTQ1EE8dFwSBU4UrCxmP +kTTTB/p9IfRXyXn7Bp00EfDPc+0+I0pZrnQLA3CLP8oxGngh7RBGE1BtjNU9mxCu +P2dn2lQBh1bB5u5Ggm7T87BEpWmTMaU7wrp5drrbzuS91OQf9UGRxVt0UZwApNBh +tqabqXourZ4NOERuy8WL9wFG8IAymSAEd7noVCXcv25SxBng2tyo9nI272Ufq1JM +ymn1Bf2aDDJsb/n17dAcfxwbnx6GdB0jEIoUaWMkSWh8FfjLpE29uUraVBBYTmd8 +TlkFmodWG8ctHpwDXSmQ80lcKC7lZ1M3NCjKwdKpcM+q8HG3VuFmrg== -----END DSA PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain.pem index a64642fc9ab0c..40290ff1c2b1d 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain.pem +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain.pem @@ -1,12 +1,20 @@ -----BEGIN DSA PRIVATE KEY----- -MIIBuwIBAAKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADDHj+AtlEmaUVdQCJR -+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gEexAiwk+7qdf+t8Yb -+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/IiAxmd0UgBxwIVAJdg -UI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4V7l5lK+7+jrqgvlX -TAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozIpuE8FnqLVHyNKOCj -rh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4VrlnwaSi2ZegHtVJWQB -TDv+z0kqAoGAd0xuuUUSAXsXaQ/dp9ThBTVzdVhGk6VAcWb403uMXUyXKsnCIAST -m6bVWKjNxO1EsP3Slyd5CwbqIRUBK5NjzdQP/hHGtEIbqtYKY1VZI7T91Lk8/Dc/ -p9Vgh27bPR8Yq8wPKU3EIJzYi0Nw8AxZf10yK+5tQ6pPUa3dH6lXt5oCFF1LyfuB -qBYh7hyIsfkb+cZoQ57t +MIIDTQIBAAKCAQEAj3k12bmq6b+r7Yh6z0lRtvMuxZ47rzcY6OrElh8+/TYG50NR +qcQYMzm4CefCrhxTm6dHW4XQEa24tHmHdUmEaVysDo8UszYIKKIv+icRCj1iqZNF +NAmg/mlsRlj4S90ggZw3CaAQV7GVrc0AIz26VIS2KR+dZI74g0SGd5ec7AS0NKas +LnXpmF3iPbApL8ERjJ/6nYGB5zONt5K3MNe540lZL2gJmHIVORXqPWuLRlPGM0WP +gDsypMLg8nKQJW5OP4o7CDihxFDk4YwaKaN9316hQ95LZv8EkD7VzxYj4VjUh8YI +6X8hHNgdyiPLbjgHZfgi40K+SEwFdjk5YBzWZwIdALr2lqaFePff3uf6Z8l3x4Xv +MrIzuuWAwLzVaV0CggEAFqZcWCBIUHBOdQKjl1cEDTTaOjR4wVTU5KXALSQu4E+W +5h5L0JBKvayPN+6x4J8xgtI8kEPLZC+IAEFg7fnKCbMgdqecMqYn8kc+kYebosTn +RL0ggVRMtVuALDaNH6g+1InpTg+gaI4yQopceMR4xo0FJ7ccmjq7CwvhLERoljnn +08502xAaZaorh/ZMaCbbPscvS1WZg0u07bAvfJDppJbTpV1TW+v8RdT2GfY/Pe27 +hzklwvIk4HcxKW2oh+weR0j4fvtf3rdUhDFrIjLe5VPdrwIRKw0fAtowlzIk/ieu +2oudSyki2bqL457Z4QOmPFKBC8aIt+LtQxbh7xfb3gKCAQBVB6bce7VXrIhB9hEE +jRlAUTm/Zezsl1CfaCjr+lejlxFybg5pkNQCvPsgpELnWXWz/8TXkbzAxSA3yGB0 +LSTp7gfucdFleJrGGZ94RTaIZFslDvk5HtFaZvjvUavyY3wCbMu+T1QUtfpQMQpP +qikplvg/2mzYhh3cMpdhFqj6EQcC12gHPPA7qC2jXnvsW1qqx0wtIxbBJvCqFqmA +gnOj/FoxqpTmMsMDG+8cwkOQ3PZTv1JbqVeJGFMvfsqb05SfZlO8XzXOvTm7Wexc +IXHTUTsXb36rH4tNpBqxCc+l1LOd3vXXPtxxBXsGDV2UeDOLWnMKp+FXj77vh0bc +W3aeAhw3xacY9KJHUobKmnlsyfgPhURZXWxg0U9oSzOr -----END DSA PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain_with_params.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain_with_params.pem index 0a2ea861b9b66..a57dbe80015c6 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain_with_params.pem +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain_with_params.pem @@ -5,14 +5,22 @@ fexykg9Kxe/QBfDtcj3CEJNH/xoptJQVx3hi+0BPPK8+eUXTjwkQerGMwUD7UQak xuUS/22GakHZV5G/kCc= -----END DSA PARAMETERS----- -----BEGIN DSA PRIVATE KEY----- -MIIBuwIBAAKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADDHj+AtlEmaUVdQCJR -+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gEexAiwk+7qdf+t8Yb -+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/IiAxmd0UgBxwIVAJdg -UI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4V7l5lK+7+jrqgvlX -TAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozIpuE8FnqLVHyNKOCj -rh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4VrlnwaSi2ZegHtVJWQB -TDv+z0kqAoGAd0xuuUUSAXsXaQ/dp9ThBTVzdVhGk6VAcWb403uMXUyXKsnCIAST -m6bVWKjNxO1EsP3Slyd5CwbqIRUBK5NjzdQP/hHGtEIbqtYKY1VZI7T91Lk8/Dc/ -p9Vgh27bPR8Yq8wPKU3EIJzYi0Nw8AxZf10yK+5tQ6pPUa3dH6lXt5oCFF1LyfuB -qBYh7hyIsfkb+cZoQ57t +MIIDTQIBAAKCAQEAj3k12bmq6b+r7Yh6z0lRtvMuxZ47rzcY6OrElh8+/TYG50NR +qcQYMzm4CefCrhxTm6dHW4XQEa24tHmHdUmEaVysDo8UszYIKKIv+icRCj1iqZNF +NAmg/mlsRlj4S90ggZw3CaAQV7GVrc0AIz26VIS2KR+dZI74g0SGd5ec7AS0NKas +LnXpmF3iPbApL8ERjJ/6nYGB5zONt5K3MNe540lZL2gJmHIVORXqPWuLRlPGM0WP +gDsypMLg8nKQJW5OP4o7CDihxFDk4YwaKaN9316hQ95LZv8EkD7VzxYj4VjUh8YI +6X8hHNgdyiPLbjgHZfgi40K+SEwFdjk5YBzWZwIdALr2lqaFePff3uf6Z8l3x4Xv +MrIzuuWAwLzVaV0CggEAFqZcWCBIUHBOdQKjl1cEDTTaOjR4wVTU5KXALSQu4E+W +5h5L0JBKvayPN+6x4J8xgtI8kEPLZC+IAEFg7fnKCbMgdqecMqYn8kc+kYebosTn +RL0ggVRMtVuALDaNH6g+1InpTg+gaI4yQopceMR4xo0FJ7ccmjq7CwvhLERoljnn +08502xAaZaorh/ZMaCbbPscvS1WZg0u07bAvfJDppJbTpV1TW+v8RdT2GfY/Pe27 +hzklwvIk4HcxKW2oh+weR0j4fvtf3rdUhDFrIjLe5VPdrwIRKw0fAtowlzIk/ieu +2oudSyki2bqL457Z4QOmPFKBC8aIt+LtQxbh7xfb3gKCAQBVB6bce7VXrIhB9hEE +jRlAUTm/Zezsl1CfaCjr+lejlxFybg5pkNQCvPsgpELnWXWz/8TXkbzAxSA3yGB0 +LSTp7gfucdFleJrGGZ94RTaIZFslDvk5HtFaZvjvUavyY3wCbMu+T1QUtfpQMQpP +qikplvg/2mzYhh3cMpdhFqj6EQcC12gHPPA7qC2jXnvsW1qqx0wtIxbBJvCqFqmA +gnOj/FoxqpTmMsMDG+8cwkOQ3PZTv1JbqVeJGFMvfsqb05SfZlO8XzXOvTm7Wexc +IXHTUTsXb36rH4tNpBqxCc+l1LOd3vXXPtxxBXsGDV2UeDOLWnMKp+FXj77vh0bc +W3aeAhw3xacY9KJHUobKmnlsyfgPhURZXWxg0U9oSzOr -----END DSA PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_encrypted.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_encrypted.pem new file mode 100644 index 0000000000000..bd97ea336952d --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_encrypted.pem @@ -0,0 +1,18 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIC1TBfBgkqhkiG9w0BBQ0wUjAxBgkqhkiG9w0BBQwwJAQQZyhaVfFi46pW+9xj +VTztDAICCAAwDAYIKoZIhvcNAgkFADAdBglghkgBZQMEASoEEDmoN7JMaRafBZpK +ARWFoW4EggJwiqa8cBsGLJ/o3Q/54SR2CCAJ+UJbtylFwZ+GgvnKrzuqd2vSVSfm +mG/xC1h5hE6miYuZXpMZuNlCAeZi0odBVXzIMMkTXCC5ifufor4bb5EeMwQLder2 +NK4IW9QkOu8IzO/ohuT+xJwiWxnyItX3bh68GFDHJH+z0+WHILHNihoUBg+HZJZc +RDGSU9GATjcX4WMnDJUnaRVJ71umBZ35RJliKKm6oJYgEmbQpytd03paMttvYUD7 +zaRAZFBXXudNVV3GM/+KlJX4huyjKbaJOv99piUwrPr9WK9OqYd//tdU+TjJKZ4/ +8yMEmTfoEUJFtQTuJ7bp74EgrqtN2FtP4v7ZQ32Js/fL0TlS9SuxHY6XnyQAZm8A +C8rSql5nQD2RBfY+OZ8k2ixVUx2kNFBZS3GZds6aRX4AG5dFBajQOJ4sAQVHGLL5 +qB7xNblgL0tepApTQ9teD/O53fSMkbxEROxipG0ukiL2hMq4s1sMZzIHEq5U+wWs +HyqNIRBbrYv2zgE5TT6o+yszddtZcH5spRT22bmNGNrREWP9KMiCuOrfgcBEeLwa +KrWS+0cA0nzmGVxaw6tf5SzQHOy2t/+L93oDSBs/9uq89PpcMrYtW37EE0z9vcNO +PqDFfJOKNtaN45s2cG/iCpMOF5EaII86gCODgl+sUaITtNrOy5OROnsuJJV5Qlom +kRf2p6b1EF41UcF94dvYnMJrumeWfNltMhhDOHF0qiuL+iCwoMPD2M6VyWCypGLc +BNXNA6/pmw+o08blonOJ8grXQ3LWj6LmZdWkPorpwiepmzmKH9wIowSC1j+AddrG +a59Z6s9wFrIl +-----END ENCRYPTED PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_plain.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_plain.pem index fc5f17ce89897..d9f6e6108d227 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_plain.pem +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_plain.pem @@ -1,9 +1,15 @@ -----BEGIN PRIVATE KEY----- -MIIBSwIBADCCASwGByqGSM44BAEwggEfAoGBAP1/U4EddRIpUt9KnC7s5Of2EbdS -PO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00b/JmYLdrmVCl -pJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNaFpEy9nXzrith -1yrv8iIDGZ3RSAHHAhUAl2BQjxUjC8yykrmCouuEC/BYHPUCgYEA9+GghdabPd7L -vKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJFnEj6EwoFhO3 -zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7zKTxvqhRkImo -g9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoEFgIUXUvJ+4GoFiHuHIix+Rv5xmhDnu0= +MIICXAIBADCCAjUGByqGSM44BAEwggIoAoIBAQCPeTXZuarpv6vtiHrPSVG28y7F +njuvNxjo6sSWHz79NgbnQ1GpxBgzObgJ58KuHFObp0dbhdARrbi0eYd1SYRpXKwO +jxSzNggooi/6JxEKPWKpk0U0CaD+aWxGWPhL3SCBnDcJoBBXsZWtzQAjPbpUhLYp +H51kjviDRIZ3l5zsBLQ0pqwudemYXeI9sCkvwRGMn/qdgYHnM423krcw17njSVkv +aAmYchU5Feo9a4tGU8YzRY+AOzKkwuDycpAlbk4/ijsIOKHEUOThjBopo33fXqFD +3ktm/wSQPtXPFiPhWNSHxgjpfyEc2B3KI8tuOAdl+CLjQr5ITAV2OTlgHNZnAh0A +uvaWpoV499/e5/pnyXfHhe8ysjO65YDAvNVpXQKCAQAWplxYIEhQcE51AqOXVwQN +NNo6NHjBVNTkpcAtJC7gT5bmHkvQkEq9rI837rHgnzGC0jyQQ8tkL4gAQWDt+coJ +syB2p5wypifyRz6Rh5uixOdEvSCBVEy1W4AsNo0fqD7UielOD6BojjJCilx4xHjG +jQUntxyaOrsLC+EsRGiWOefTznTbEBplqiuH9kxoJts+xy9LVZmDS7TtsC98kOmk +ltOlXVNb6/xF1PYZ9j897buHOSXC8iTgdzEpbaiH7B5HSPh++1/et1SEMWsiMt7l +U92vAhErDR8C2jCXMiT+J67ai51LKSLZuovjntnhA6Y8UoELxoi34u1DFuHvF9ve +BB4CHDfFpxj0okdShsqaeWzJ+A+FRFldbGDRT2hLM6s= -----END PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_encrypted.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_encrypted.pem index 69dfde4b3c502..374467e05e280 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_encrypted.pem +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_encrypted.pem @@ -1,7 +1,8 @@ -----BEGIN EC PRIVATE KEY----- Proc-Type: 4,ENCRYPTED -DEK-Info: AES-128-CBC,692E4272CB077E56A0D4772B323EFB14 +DEK-Info: DES-EDE3-CBC,0E2911A50F45B630 -BXvDiK0ulUFKw1fDq5TMVb9gAXCeWCGUGOg/+A65aaxd1zU+aR2dxhCGXjsiLzRn -YFSZR2J/L7YP1qvWC7f0NQ== +msSD9vAzUme59T7C1AL9XVLlcjnEEsA5v5fKvIr39GyJ0WeWTz7OaygM67xlkjGr +zBCabxgE4qL4Ydra8kEUZAbIYmdXs0kHBFlu2UFv8yltVfoWa8FR3VPEBrpq99L2 +NTuiWUEo9wvfLj7h4DiD5o3ejbMyomx8+V4uzWpCHbk= -----END EC PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain.pem index e1d0a6a8319c0..e8009c1d2d520 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain.pem +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain.pem @@ -1,4 +1,5 @@ -----BEGIN EC PRIVATE KEY----- -MDECAQEEILEXCgqp9wZqKVmG6HTESPeCyx2O4TDoFqyILz7OGocEoAoGCCqGSM49 -AwEH +MHcCAQEEILEXCgqp9wZqKVmG6HTESPeCyx2O4TDoFqyILz7OGocEoAoGCCqGSM49 +AwEHoUQDQgAE7mUZVxp/0TnDu8hSSedG9tGL4Fd1PhaUcdJ8f8ooFo+sYhDCp1m2 +1JzNJihfHNxhxpOYPDlz52yvero+raTAeQ== -----END EC PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain_with_params.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain_with_params.pem index 2ad57473236b3..c5bed51ef1f86 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain_with_params.pem +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain_with_params.pem @@ -1,7 +1,8 @@ -----BEGIN EC PARAMETERS----- -Notvalidbutnotparsed +BggqhkjOPQMBBw== -----END EC PARAMETERS----- -----BEGIN EC PRIVATE KEY----- -MDECAQEEILEXCgqp9wZqKVmG6HTESPeCyx2O4TDoFqyILz7OGocEoAoGCCqGSM49 -AwEH +MHcCAQEEILEXCgqp9wZqKVmG6HTESPeCyx2O4TDoFqyILz7OGocEoAoGCCqGSM49 +AwEHoUQDQgAE7mUZVxp/0TnDu8hSSedG9tGL4Fd1PhaUcdJ8f8ooFo+sYhDCp1m2 +1JzNJihfHNxhxpOYPDlz52yvero+raTAeQ== -----END EC PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_pkcs8_encrypted.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_pkcs8_encrypted.pem new file mode 100644 index 0000000000000..bfef68d57a722 --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_pkcs8_encrypted.pem @@ -0,0 +1,6 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIGzMF8GCSqGSIb3DQEFDTBSMDEGCSqGSIb3DQEFDDAkBBBJgd9ei6iSF+3O6nhk +A/CTAgIIADAMBggqhkiG9w0CCQUAMB0GCWCGSAFlAwQBKgQQezbgAPm2wh2vFE6l +bGKePwRQZub5Evev8F/53CGRXhF0sdL+i/2zCJcmqrauwPr6VtgQdXmBlJcur3ft +4PDXCe1R+3jhk56gmOBDjnOepPnWge62lKO/nfff6lpgr/uXUe0= +-----END ENCRYPTED PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/key_DSA_enc_pbkdf2.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/key_DSA_enc_pbkdf2.pem new file mode 100644 index 0000000000000..bb1655d2e9548 --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/key_DSA_enc_pbkdf2.pem @@ -0,0 +1,18 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIC1TBfBgkqhkiG9w0BBQ0wUjAxBgkqhkiG9w0BBQwwJAQQ9WLcmXfK4mQgb8z0 +VEFgnAICCAAwDAYIKoZIhvcNAgkFADAdBglghkgBZQMEASoEEGUh9m77oFyis8j5 +VedmDqIEggJwymDZJmHaNgIiJAI/psd+hR4n03oMwUaV72DmQewEdMhI2sEy36WU +Pup7X8VmRLb4tyiSiEUlh8FIX3cMpQ11e1j/lwW7wF+W3Qb6CHcMu8FCz3LN/CS4 +M+sQttfXiHh70qZvRx0SNaJo8A+e8HRGmYrbz6VqdlslSdB4fDT8Igls45rDZbch +LJlHQfy9XQSgCFR6J+6/6Q8GyW07+WnkuYnbixN8ZdZ4jPE5mrZYMMQrQY0l4ThG +vpb7U6VnWepDnXgeNWZTjHVLSAx3bbLUpbwotJnZISyTlRCxFSnunrRIkgaWPNMr +qE78FfE8I8Y/3Ft3AURgM+o/AvgyNCNM9g6DCqjaYpuaK0aJpdvaez9BiiANosBq +Powto+vuaDyYVIEhZ+GbokkvXx9muzvyA3KpqN1dg18au7Mqpkrenrw7Z5J8TnS2 +Pv686vSxCmisInC7c7uQYVxhze7fYMDUsyvWNPNUUrYnqrVtZtjD+VjkuZHJrBnL +haz5xQ0cw7pPY9r8R1y5jxMCVKxMBvbOsQJ+MBqGXseYmeB8qBBMYVdC+bNdEzga +rWD6FCX/k0PH2nP6KaU3qWLh3ueEtwTh0KO4yXgKyiLzF1KXoF93+4i9hX2w+t/W +Y5jgNErriqrW5WOQFDrSlVmMx1dLNFzM1cB7TKygZrzytULAYAg/0el8Gjbw7nKP +HInVUFKWhpNipEhDCGnGKoBvSz88AYAHS2I4fnFg3AfZCWEkkKJg++Y4Wip4+KTC +XjECqMqv6hwNbvMf3JkmqTPZVh8MtLIAiR1rUIWdZMq18+4vnHtW0FXzLb2nYn3u +ZrtXtOGxpBUY +-----END ENCRYPTED PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/key_EC_enc_pbkdf2.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/key_EC_enc_pbkdf2.pem new file mode 100644 index 0000000000000..b851058d17217 --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/key_EC_enc_pbkdf2.pem @@ -0,0 +1,6 @@ +-----BEGIN PRIVATE KEY----- +MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDA0+sj4ekT4h5OgmLaj +idCmLthqOUDdUNf67bBLjRSapUedsBIqSCx2u5E9ca2uGXKhZANiAAS6mhP+8zyk +CYIaOgF35O1KeRxrPsvWfm8tb5+KjuepPI+WR33xiBQcnYfeNrYMgP000Ifk8gfS +mv5aCHa5dBdgTzixsupMng0R8/jLPtS73Fzhi6G+KlRIe58c0xcVB5o= +-----END PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/key_PKCS8_enc_pbkdf2.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/key_PKCS8_enc_pbkdf2.pem new file mode 100644 index 0000000000000..445d50f1cafe2 --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/key_PKCS8_enc_pbkdf2.pem @@ -0,0 +1,30 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFNTBfBgkqhkiG9w0BBQ0wUjAxBgkqhkiG9w0BBQwwJAQQ86A6gbRa4DZIX+cz +TSf/DAICCAAwDAYIKoZIhvcNAgsFADAdBglghkgBZQMEASoEEABJ5byRdWBpd1Ho +U/5ukYAEggTQoJkyyzwsns3QvYy4hIuwge7G867QPSCnHXhKInOYNDgbTnf36ia/ +eO5PELfEW0sW6ZZt/D9h28vssT0RI4PTyCQCv3DaVym6f9JbmnfvJePlaWkheieN +j2Y1gth4fEFWKQK6Px3hkZCCjc1LGrSSKoqy3YhWlxbjrj0UfCpF60MY0TLcegZ1 +Zdl4HVjROcDpSBC/OyWb9LXtyUM5NJVEjHqr138iP/S/qtkn7kovJEVqUSIZd2T9 +BQwzCDzZD8Rl3W/ivZnCn/3lHkDl2JgQ9gVXrk1QhtKy0XF8z1lrKbYPkCL4nXR7 +2qOScFSvF/JjbmhxlnfjyrpCv4ckcvT/+KFvbNQP1p8/OFfIsapG6wTz2XGcwgA/ +c4uxrnB/110KO2m1zexsasxRTfvyHaTIPHl6NNh565cjieqdvp5KbzZBs9eJA19e +NTeLVbXYZA5Ols0FF9cG5eeU7NPVFVMS7UILHnq9v+i1eKO1VPUWmCZhR8Sje0M2 +DpzSnQmrErVaH/lbZ9ZOklFhpL+UvW+g8IBSLLdCo+MlyOr/Ydr0HiADBb5zSiUo +iWOrzgA9lLDG7VSHrpTU0I+PE+QctLVTPX2f+S+/pErnQ7Y+DE+OOsM37jGt8Zsi +r+XcxxTZUmiakr6fUDEVG0NxbErTRgpHdSoT3RFgcs37MlrC88JbOs1cOiwma7/e +56gqx/3uHJWyPKjVC/RUfIqsSpTx1EjqHeGYnJ9DTW+Yft+d7/HEZOr0Nl+3Qmoa +b6Bxw+5c6Of7HYhEKoi37l7O1//bmrs0pURPWPmawPZtlfwd03ifFTZDOvn8cKEL +TUFHBYd3V8kNmqRI/oUq28gk7uFd0Wby4epXcSVgSX2hAloSMYGfzcUU5u38v18J +JxYgg3DyJAMH3V/GHV98XU0zscbaTKreKMUaXduDS3ktk9maq6Mne/fpI/ZZP7pr +C7c1RJWKbSSdwAchQCMcIHUSZjA0iI7dIde9VP8e1DlErdWch3i7wdJQV4YqMM8v +3sR3fV31vkZcSUDRCcBJPlNd/j6+AaIU0zVt3yWUUSCExSAOCybrlu0JPCSjjyOu +Kkp0xEa6xt240QA+PyeUl7aov1wKZ1P95aek0y1AJy9SmcBUwBBVaeG+ETO/C5gv +g6VqjG18BX6ulzJsOsLnQCxvbQajB/eF7dvex2OzU+jPUPuvZ1IRu4SHw88eyGz6 +r8RzQ1d7sCr+kV6pWXrEaNnwyFhOhwdNMxaYwSUItrfb3+4jPDoHa/di0sJ4Dkr/ +UVuqnc7TAdW9x+PTtUWMQfaX7S8o6XDNXkhcWznhNP7OmkQpT2K5kkaGfLeHKRbz +7NHCwRXEm49ZPfDCnI9kddnejU60vDHW1uBGH2S5kn71noAe7R07s9qeKW50eLOf +Pe9BlOPb205gnibRYjjj0pUZ1YJwD4rkiXaX/fXHkPpgpyUEbw3tAZW+FqXUZSaW +TwAj41oXms0VoaUi/TcvsIDjnldVvZ0MkHUwMtfOvHb/lbrafHKoTHIuBbRAHSNf +uQXvBDwiq2uv3v0EZdz4mouqcp8aNZmunVHu7c22HCaf4s608BIq4FBqrq6XRdqo +cAOcq+WGk+F/helMKaRWo737062tq3dlhtRpGLXbZUcYThUNY4SjR6Q= +-----END ENCRYPTED PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/testnode.jks b/libs/ssl-config/src/test/resources/certs/pem-utils/testnode.jks index ebe6146124e8fd607e46a1a3129bdf9b4de0370d..1502bbad4eaedce7194d7e28acfe87e6b86c29b4 100644 GIT binary patch delta 6649 zcma)=byQW`zQ_08bax1wlI~4+gLH#*w@R0yvS5=+OG$T2mvq;rq@_VXq@-kvDDXJ< z-gn>e-1EkJf6TGQTyu`O#+Y+{zu(Vq#j7U!wvX){`9UCX!ef5n6%YvZ1_npHg5jcLbE1P$!5k1G zFc=k%`o+k1p~IH=y`U+C1@oHVIs`BL8dvUBQ#{29SfjIVOF-m}{oLZq#gAX05?}ho zL7WG5&IGCZ8)<7}CpY4wQhd7ciSl0>1_3+5U!j~BIyVwR33OerIN7pArL`S_1xmSp$JB{qOJ+5pdn zf|fAPs)1FjpXvavg}RQSKb`%nue~cIMP?6)I3b>S5~du|t%E5U&;nPGQ`!kX&>B`~ z_Bb6^VtzCw0}{hg1ZUvh?GR5Po1_zrwPy7-jsEE$d5_aEXCKFn|u*}72zHlA<4lr1^*{H$QGGNM+B$Xwt*E7%NosdQP zpf?NYT0(%vW!q}F;=D>OP*s8nLMNeAssK@_`Usi08mTi5lr2Al)2lnqF1EbGZXhmn zK!h?~h_GE5r?Z+v~+)*0~m7h)&a+5Zjl14+UIZNWhv`hmBqD$;>lYIS<=yh9o zv4Y*Mk%Egk>J;Uf^x-3eLKJqG^!BB-h3-%9t#hkO*E*AE3*sZBiT3=Ok9Hq2vc68p z3NmMJuDt$!|7PP9mEoeuN`VeZC(pKvM@OXjmXNIj`fwL~eAbs)RqXc&E~v|E?{FcL zl$8pW%8^TkTHQPV;3iLAw()sYQ|r~bBSRJDhs4p7Srb@g$B;}qMC{8R?$m>N78k06 z>FxeOtc(vRn;=|i@HkKQSM~2(>)S)*zCtb)ELte#;7QAM*1phVG?5UhJ#RetchN#D zdReflhr<@b`nSsb-B*LBY-|mt(3PGMdgm^+{dn<^@$Fn-W9nN`F+n-aB25 z!ptWkR6be)D68H+Ps{e2d;tSZ4IG(L3|8GCsxZcQwedLSl>VwC$Jh2|9%j%CM7tbe zF4>c=fjCggGmO=^j{7S8X5T%?kkT(JB9u4^rf7yV5}vi^vlZ~Y#A4%6=1QqXCjm+x zBX$j~J|eh)#t@YO;TWka^5!_COTUDPaSDm?CIa_+@p`lwlJY``zWm&P)z zxpuV%pkrxz7gFmD6lTBj`=(6>&CANzvol|`4s2lo$xNio))85QIH_ecZyzAD;{-Gp zdVDbYDDXr49-8JC`%XX6i#iIiM0~q*PcR70m`4aE1_Gfp!{8|OFgPS57X<=_KuGCT zJlwkVCSolqbq8W}>4co*FUeqd=vW-#D&gPpz+fCS5R8tG21bpJVu*@@ORDYR;HK|j zBjaV~OmA>!!Sq^p9t@+Uz~dK$iSmi@3GwmsiI`I03H;F&`B(QpS4eE8gaOSuEvJ{$ zC+-l;x5O7&(7+qS`r5d?kQokRcIXFRZNCLv<<+b0z17p5Z^)`vwaxzg%&SaFYO%ON z3IB$-$c4&zJJ&f^#Ra1*`6F)yO&Ju~wO7UDEwX&- zK)*{gQ*I#ItSZGCDeuCo&fH_pJ-FcI&hLWQq8vK>^|9CJ%EbTpr`*r}beTuzL4FlI zzbbcix%i)bj{bV=9{qN)pkS>oRa5xMDrWVnlEnK_h4Qp_)I?9ZsWVnB&A&rXz#woz z@Ml=?7L4X@hiULp$x%sT^mj%oIN*_aOB#!SC}(fu*Zh)M|~5BmEF1b<8* z0fTQ*(O@Wdk3NAm*ISF_x{96mef5d% zb7ItwZ5#r}55{`GKd5?5ke}seEE~bWT$sG#N)KdbQs<~eSOE1@Q5Xj!<3G=mQn|E^ zP+WT@6icd~yA-_Kqi6q>Fw@3$6v5@RN!gX6if=~sgUtg%eLtyvcGe*RRVvxF=yC`TgnEsUmI^htS~ z1RC%pSqHUSl7Ocz|G56d|E2{8yWdcOPx5a30v3Gs7g7k`B(3QhL~0d=Mr1S(d{N}@ zo0VOWtvt5Urf*}-NN|*+<}F=R#w>?-hG^g0c{16bsJR`LWKi{pO?5wo76uS)lD+Oy zF9K#IJx=aJ>pxz7NHd&e8c8Ah5Zg|qA5$E1ERj@IJD;|5CUNyF?^Cy>r0Slmm%sd+9v&d-sJVGh*mASmS!XPA&eMS=XM zs2yzn>I%T0Z9(XFTM#n+zgO^s5u#)K{1CByhB2LfV~Xk5;zMDr zEm~W*uaS*MbbdK1J=?Zoz8RI*$zh=KJO?^ML57k3ACgRfHsUTy#0mw!V8*(pRVb#P zI;Z|kl7Bma;BPdU2>fXeQJg?9dkBojRVf`{L#aAH`4NU^8Q=qOt~)kch}yQdV&_;< z>(muaGVec-HI?sP)m5>7ln#f$FwCl*NyZ@gZalo^bJ!Zg$*EZZ^>~%u792)u)r8|9 z5gamg3*M_hJg<=wZ}LLae;hULKU4tx0nPT_Hg~a*Op1b6eFxatcSoQ87X`o}k;!&- zAsmj8qA;-7PU*FJwpxy;(S6)>F(94+^zFc&F>`M5aU*j9QE3(?(W~WwDOQbmfb~>#joEs|cknkRdKL zZe68-NBpOvw^F*dOQBY~d>b!sQ4O^s&Wx`6c$N=_jL7pC1hc=!*>Fj(NM;wX~uALUwso zD)3KL&d{zFHanis4=<}%lAL_zE7nK>t=bL&&VdhOz)u0!CAV8dm&_=6EQ5+}4E~!} zff*eisYCQ>L1hFX=M|e$TC#ZH$OTu`+4XqAaU=!%`V&E1YYArI_ffW%_t;!`x#JZ=WpxUYDLe488s=|X&^-RJFEn!qlp0fs(GRbD>CU%(U*u)}B4qDU?ZY&2{iI6N z=B9EG>d52D6N0tO*|aYY>E-4Gq7$@-*eD%>4ch%kWT?_oDSz&=P*sI1i+SsXK zIOtf`;VNJUl;16i!%bDdylo-TLsn6C{Z>G`#@CrzTsJfuWG1^`8)hL(t2S`wtU_zRw8UE z+p`S@yA)A&TMtshNkU4T8n* zczpNXT6~hKVdw23iq5&p3I>9A1!9`tG{Y8Jwp2}cow43}k%^J*OFfQypg$;}6c;4H zU)Z~G<`vKErX}-Q0#h`vUuSzWnu@*P<+^2_!kW6{Eo!{<+$1^UrtxgVAm$#x0R2dV zU>tQ5#c;U1za`(PqJicoCT0ztcY@G>T9GNmVNbuWukBtrjrb3R9RvJz08q#B{(2`-5XKJ77>-7f0pX5!hI7 zKKQg`yuzqGZzM?0`sfD&tDfGkB$L0G^-NhhAtEEce^;@CKG;B`(JY8dIEJoNdiK?x z7D29aEWi9~^QZk!2Z0zg)@EqBcpDDRCYPdfirDVVgwHY|l|wma1X9D9Wm$TNSKTlz zM7`mthWADMUSY~ik%hk5!7rs*RF*pX{l(U(t?b236rfmAFBoSqviZ}I0{P}K z#qt^o9E=9~V=D|XidUXD8oi5@LlU9J+>_N6>vA|eOx4M8W;?htDo7>ra>I9tpM|vl zq%V};HSw*#?O#Y}?;fBR8MgM}X4?CT@zrz6;c5Edy_}~?!jbl7%=kaVXs1jaFOPLA z&g@=5o{V;C0|umxS2}#y+U4x*DVH)!PdCw=DcL!mx~_A11$AoaWS%*u>jQR?#sA~I%@avb&B zt%N2z#M(>_%^5srbR{U<11spFGz+O}u_lXV>$BD_WJm?+DGm$ih_lobxHc|V}I!BQM!KCl* z8k{l|Ylc@xv`9u`KO3oX;_Jj-PA9Hkmt5FUcsHCk7)0`KQoM`E-%yd0^nXuE`=2#3@0}<*gU9T3)?06`EAd~1>0Q1^ zDiYl6uJX2{jP?#aWL91spH62}8#oe%H?rjsNB1m2{Nx6*Nr5o;E54h4DAO!35kdI+ z{o-QqOwv1quSJdZXB9YkuC%Sb>Vd)iTjz_|>~ki#rU0|^L&;dZ%O0@|h3E0zoEpJ% zEW69b9=Oq+d?hubgcwg*-KeezBvt0Dyo1%ri{KknFjAk`XY6AZewOF7yE38H$x|D; zEh!1n&k5g+n2dF!TOv{s0e1KRsYAS+u=h|%uCB7s)w80n_iIKH2-ok2x0Mkmir2;` zO?q`S?ut@xaq$;SY;O;7pE_sz$3Pr@{1_yS6UXFJi$#RQ;K08%2vLaFrVF)n{)}kn z^`#6-ZLG48?Y~LGe|~`Lr!x15I4~7|{49riMune&nhe>L#h;Iby|vlMCzEGS=TdIj<~7^Mm(f8F$ak zPe>1LIWzs*PT3J18B(elMor#pceUomsffdB%0?sRZNF$>%QKv@DG1R_9GLqltAhi3Je_ty?k7Si*KL% z?aPt@Unv4K;C}4ULN-8J_`#cDBuJ2Bhp)qkHDV#M*755xfB?T_h6BUAEbcJ2mWui_ zosALD=x<4hMhJtW;oc=>>%XwxK=~J4%u-$*>gwDERg2yVZ?Njne}^XU*VO!Xz5iFR z|1;MM{?7G+{|~N*qbU4cWZvm-!LljO%%SHKd|z?!=H#arsS^2*08Luxgp5;GyLXh$ z%#WAI?trj$0;(xB5%v6;NS`)d=tQf`lJq2~&R~~UHh+t|`}61&R?L@P8+Lqwi1QBo z$33LWji2XmUM2Rer#d9PIrZ=6O#Ik+G4P`;`cUV>bF#LUi&^>54vfPq1uW^x@5rKmIp%PHBn!hd1E7VZ+gUaqS}r_ z%F+tt@ym%ZW~K&Y$DNg~arjWZSYN8QwBx*LNJpU2$OqmUcvyz?RO-Z4hRFm^3L0uH zYs#;!>x#bvlA}y;Z6WC#35qmp^;nmhdVP--Z4x8IO2*o}q1`r+ukaxamjO-VO{d|-&O(mu z0oIQkW~}rR2IV=}Z9Qe=Y(>DJcK7OH`aPbGm6?1q(}&z8K!nK$J1eqjR;HliuVs`? zarIYl2i-gyVIP*yY_!&n;xkmReeoHU7p95D*ktq0v@y&!yBZ^8V+~iMeNe-LApYUH z*j|o7lxsfk3j1IJ@zAGIQ>V35vZdDI#Lz@bwc?c6&%uGOrAUQD-}mDf{%%hQ+voQ>Mwad( z6aA0K1cUN8<}HTjbtm75b3gAypv#-sj6~~0AH2^MVy0Ciulyn&kZg!t(=(`E8y#O( zUZbZXhZ#M4+e*J8YKX4Wr`ku2McvMoQl? zwSI9L=y)MDK|`fYL1@}Ug?n6A?C;FW!hNsW9i3Hnv}o^J=4$J&>&?h2;Iln4VLUCU zzp-vM+xb6=RqbebrswtVjq}8*$PtUIm{$|!RKFFge>>r>R>k}452QrR-(pq<1e1h7 zRYK^GHf%EplpqHv0W~v}OvBla_Kw@P5cgc=Ay92Limw#sING6S@o$%t?839J$&kI9 uW?!D)Z&@n`nmx`eii4MAPi#ACSD%(LH@9Q=Go9lr{zS- z>dHVD%O9s)t$1Qw%95&Az+N>Qq(BCYHrQ6()@O{OzyDXx{fK^l;7x1RCm`d=KZ(*k zc@DIPdP;}`pKii_p%#6XfSR)vv~qqoDbA%qb8Hx% zJo8K0k4?N4%L#+(D;dc@ zjJgR_HFI{1@b&M1vaAh<&Wt6S<60YhSfJIG!s%80mPl69(gQaq3c|_CS{A$yArZMG zPoY0Uf&u!)b|w9oLAH#0sN_OD*s2~YO#7|}@QF2qG3K>aa(wItBv}1B`8C{62TvrN zKcV%Q&alXYEQ^K~0{6D_5ipi~CygeSz; zJCLUcCzoalmhZ34$A$$^UPId7ZnA88yLCE0iU0rr0RRP9E;TSY0009#FoFX(FoFW~ zpaTK{0s;ggJacL=4F(BdhDZTr0|WyC1pt#A0VR?B3llUmF*sTm4Ky_|FfcMOIW#jd zIFlOzCVzqfw=jYMECvTEhDgph1OYID0UrW^fdKt~Q-K|I5-C#ON}Mk2&TO3(T^Txq_nWgbVOk9Q6W$fdKd6poP|(J>JW_t6Vm?SGjqVue z>Vo-KOba~+f@e`#SB|>WSx@G12M~dPv`H3mBj`*h78AG62{Mexrr|smdW%$ijVR!w zu74gYv!->CZliw-`XM!9@%v(W0VF%jr18F}QILtKgWus=<(~$Ui)LPcdsQW50Zcpo z&q*o+frJ18fq-{RZn;Gg0ecr|58bEK;RQ8wby!A|r9g3J_|togT}+oM$-*E6lbfd1 zSg6g!?L@Hs(w8TB3kK>T6#*-gW6jhL{$dfvv_c!I)(T@)StGRl)VVzTH$SJ~lBQV-e5ldhPAzVKh9c HG#>GrQ0C_2 diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index a44e1004d93ad..00931848d0644 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -87,11 +87,6 @@ thirdPartyAudit.ignoreMissingClasses( 'org.apache.log.Logger', ) -forbiddenPatterns { - // PKCS#12 file are not UTF-8 - exclude '**/*.p12' -} - tasks.named("bundlePlugin").configure { dependsOn("copyParentJoinMetadata") dependsOn("copyTransportNetty4Metadata") diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java index 1123ae4623300..d0b0403874c7a 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java @@ -84,6 +84,7 @@ @SuppressForbidden(reason = "use http server") public class ReindexRestClientSslTests extends OpenSearchTestCase { + private static final String STRONG_PRIVATE_SECRET = "6!6428DQXwPpi7@$ggeg/="; private static HttpsServer server; private static Consumer handler = ignore -> {}; @@ -115,11 +116,10 @@ public static void shutdownHttpServer() { private static SSLContext buildServerSslContext() throws Exception { final SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); - final char[] password = "http-password".toCharArray(); final Path cert = PathUtils.get(ReindexRestClientSslTests.class.getResource("http/http.crt").toURI()); final Path key = PathUtils.get(ReindexRestClientSslTests.class.getResource("http/http.key").toURI()); - final X509ExtendedKeyManager keyManager = new PemKeyConfig(cert, key, password).createKeyManager(); + final X509ExtendedKeyManager keyManager = new PemKeyConfig(cert, key, STRONG_PRIVATE_SECRET.toCharArray()).createKeyManager(); final Path ca = PathUtils.get(ReindexRestClientSslTests.class.getResource("ca.pem").toURI()); final X509ExtendedTrustManager trustManager = new PemTrustConfig(Collections.singletonList(ca)).createTrustManager(); @@ -184,7 +184,7 @@ public void testClientPassesClientCertificate() throws IOException { .putList("reindex.ssl.certificate_authorities", ca.toString()) .put("reindex.ssl.certificate", cert) .put("reindex.ssl.key", key) - .put("reindex.ssl.key_passphrase", "client-password") + .put("reindex.ssl.key_passphrase", STRONG_PRIVATE_SECRET) .put("reindex.ssl.supported_protocols", "TLSv1.2") .build(); AtomicReference clientCertificates = new AtomicReference<>(); @@ -206,8 +206,8 @@ public void testClientPassesClientCertificate() throws IOException { assertThat(certs, Matchers.arrayWithSize(1)); assertThat(certs[0], Matchers.instanceOf(X509Certificate.class)); final X509Certificate clientCert = (X509Certificate) certs[0]; - assertThat(clientCert.getSubjectDN().getName(), Matchers.is("CN=client")); - assertThat(clientCert.getIssuerDN().getName(), Matchers.is("CN=Elastic Certificate Tool Autogenerated CA")); + assertThat(clientCert.getSubjectDN().getName(), Matchers.is("CN=localhost, OU=UNIT, O=ORG, L=TORONTO, ST=ONTARIO, C=CA")); + assertThat(clientCert.getIssuerDN().getName(), Matchers.is("CN=OpenSearch Test Node")); } } diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/README.md b/modules/reindex/src/test/resources/org/opensearch/index/reindex/README.md new file mode 100644 index 0000000000000..f2ff25d41a890 --- /dev/null +++ b/modules/reindex/src/test/resources/org/opensearch/index/reindex/README.md @@ -0,0 +1,48 @@ +# generate self-signed CA key + cert +```bash +export KEY_PW='6!6428DQXwPpi7@$ggeg/=' +openssl genpkey -algorithm RSA -out ca.key -aes256 -pass pass:"$KEY_PW" +openssl req -x509 -key ca.key -sha256 -days 3650 -subj "/CN=OpenSearch Test Node" -passin pass:"$KEY_PW" \ + -addext "subjectAltName=DNS:localhost,DNS:localhost.localdomain,DNS:localhost4,DNS:localhost4.localdomain4,DNS:localhost6,DNS:localhost6.localdomain6,IP:127.0.0.1,IP:0:0:0:0:0:0:0:1" \ + -out ca.pem +``` +# generate client key + cert +```bash +export NAME='client' +openssl genpkey -algorithm RSA -out "$NAME".key -aes256 -pass pass:"$KEY_PW" +openssl req -new \ + -key "$NAME".key \ + -subj "/C=CA/ST=ONTARIO/L=TORONTO/O=ORG/OU=UNIT/CN=localhost" \ + -out "$NAME".csr \ + -passin pass:"$KEY_PW" +openssl x509 -req \ + -in "$NAME".csr \ + -CA ../ca.pem \ + -CAkey ../ca.key \ + -CAcreateserial \ + -out "$NAME".crt \ + -days 3650 \ + -sha256 \ + -passin pass:"$KEY_PW" +rm "$NAME".csr +``` +# repeat the same for server key + cert +```bash +export NAME='http' +openssl genpkey -algorithm RSA -out "$NAME".key -aes256 -pass pass:"$KEY_PW" +openssl req -new \ + -key "$NAME".key \ + -subj "/C=CA/ST=ONTARIO/L=TORONTO/O=ORG/OU=UNIT/CN=localhost" \ + -out "$NAME".csr \ + -passin pass:"$KEY_PW" +openssl x509 -req \ + -in "$NAME".csr \ + -CA ../ca.pem \ + -CAkey ../ca.key \ + -CAcreateserial \ + -out "$NAME".crt \ + -days 3650 \ + -sha256 \ + -passin pass:"$KEY_PW" +rm "$NAME".csr +``` diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/README.txt b/modules/reindex/src/test/resources/org/opensearch/index/reindex/README.txt deleted file mode 100644 index efd5e4c20ffd3..0000000000000 --- a/modules/reindex/src/test/resources/org/opensearch/index/reindex/README.txt +++ /dev/null @@ -1,16 +0,0 @@ -# ca.p12 - - -# ca.pem - -openssl pkcs12 -info -in ./ca.p12 -nokeys -out ca.pem -passin "pass:ca-password" - -# http.p12 - -unzip http.zip -rm http.zip - -# client.p12 - -unzip client.zip -rm client.zip diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.key b/modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.key new file mode 100644 index 0000000000000..a04c18c994359 --- /dev/null +++ b/modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.key @@ -0,0 +1,30 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFNTBfBgkqhkiG9w0BBQ0wUjAxBgkqhkiG9w0BBQwwJAQQ8TSOq343U8BV3rEt +vOpSPQICCAAwDAYIKoZIhvcNAgkFADAdBglghkgBZQMEASoEEFXKi3C3VJzsGiCw +Lh2zY40EggTQwtBoa+e+J/UAA/mVv50rVH7oqvs5t9wRfznrldPtUgTR7r06TxNB +DXN1spBSmJjrohC3RbEO4169YqCwAk2HsptENM3MV5A9EwTuXPVBW/ic2SDOwmiP +wvRRKUujjaYZTfVeVJi0LqnCtyv7/hc33MJ3IMeNefEwmYRH3u/ktp+NBXZPEp1G +sdbPLpCxUqtq8zE84ev+RyURbErWVvjI8ma20Hn2gACkQazYTSVMVMxvj4+m0oBd +hzQ54GjRypm6Tc+CkJXGbCp+3sCONUqKARZYo+oiL5wEdGTLOcCwaCZxVkftDZ4V +oGrHVlgFrYgADaOuokjMf178ymMJX1+kTYze/k/ajXHd8qBKRD1X49dDhrHjnlhV +2sGOTKk16fBXSoM/q4vfmBKkd+BxDcdbsDkLDdT266XBy9hdRnL6e3Qk6ag6i0dB +faJwyXHIhiS87nFLpYeXY47DABBvmKVqafdHJDab7GYmLb+2J33EbmQX+tMgKrI+ +l5FjPX0Lz6/c74M6jYGHhbii3fZKGzb9BwWCEG7eIMONfv7IoaP2HI/P5G1WheQ+ +Ocd4lsb+pCmy+tzQcB7+GtWX0sG4ugCTsKIofN9ZmkvdQsvQvjT/oubDtBXUMgIL +/6GpYr7f535wD8jp4qHjSNyiNf93XiepxUsKBh0xvcGRRfhEjrZhnDm8DYP014bL +HhWzPVUgQwDJMa92wzsqFpXCujhLDb3BzLZLCGWDUkDsPjX2hUzNRWw+nN0FEwkD +ezxZOpK7m/ZfZi0rI94oYpmanwLNH5tvwr7pKLJ2SAP2WTNYRtff7vgeKOmgDG97 +pSm49phrSdM/VbwWgoPHpGxn6De5mfp+52dz5sCZMP0tsYMa947z2VDAU9f7+AQL +V73HGQKu8eny2ofOvQiKMK7sVo9dDvf6O4fGUCZh55YmQYzNq1cYh5lgQgPJ/CDb +c2mUVhwPfd4gvmKzBQ+nxjo5Jbh0vJwqOxk0SMCwWqQW5+Y9mdcDseyJwL7iyiTd +xyN9rUdro86foF85Xja+MZ0hVW/q1xwrZSiunWuvg0uaGMdSuknn7skLnKrdbfIU +RocweZPetFxzCm7XeikCaKucoNLNSPjAKW13doZSOc4OxS4hXep211dGVvK43XwX +B6xp8WtquZaGk01J789H1XU/sz6AssuCrMvql0Gd/GeFz+Ql9dMd4bH2ZzjpRcWL +FMZvsxXzqp5zodsn/j26h+WKZYmLSnxvE+WjQHyECt1JgSyYD2I84CxKj9I5ezX7 +1PIc3/OPl14p+ni/lfx6UM5WmbrHcuLM5a2ml/9e+HQci2xDNflkCiRQ1jcXYSB4 +p5mAaxSPbC33mi7jvBtUF1Yk9CiIRW941pKhn5YSj4bEMs6h8tB4M9wfXn9HPe/X +0KdYFMzf5sc9nmDZt2A1EoZexYwMk56wVQ7gnekw9ECCs6OLUmXkAmKojvbNXG0C ++t0W3LSoFsMM6vnINVooK+dQgRLqXFe57HY8j7zTmFh69Kh3/Cv24gQ21xwPYB6y +A9AVrrxRUV4Nlqkw5A4kVKXRry9/xj5DGgZ4SI2rJZ3vhfD2jiLFnl+JBT/Cw2xL +NL32subXNGqY4ymnq1HSG3SO/Jgh21XZL8rl2kZ+QiT7QvRVFWefRdA= +-----END ENCRYPTED PRIVATE KEY----- diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.pem b/modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.pem index ee758ca3e6370..615f00e468ae6 100644 --- a/modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.pem +++ b/modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.pem @@ -1,25 +1,22 @@ -Bag Attributes - friendlyName: ca - localKeyID: 54 69 6D 65 20 31 35 34 37 30 38 36 32 32 39 31 30 37 -subject=/CN=Elastic Certificate Tool Autogenerated CA -issuer=/CN=Elastic Certificate Tool Autogenerated CA -----BEGIN CERTIFICATE----- -MIIDSTCCAjGgAwIBAgIUacmv5ElKJ1cs9n61tEpy5KM3Dv0wDQYJKoZIhvcNAQEL -BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l -cmF0ZWQgQ0EwHhcNMTkwMTEwMDIxMDI5WhcNNDYwNTI3MDIxMDI5WjA0MTIwMAYD -VQQDEylFbGFzdGljIENlcnRpZmljYXRlIFRvb2wgQXV0b2dlbmVyYXRlZCBDQTCC -ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ0rA35tPl0FN+BPk2YfmET9 -MvDWFLvfL2Z1aw1q1vnd12K9zumjN6veilHA2Iw/P4LG/mkQZvY4bDPgibRD7hbE -vwPoju4vr614tw60+FlkpO6HezYo2I3cni1//Gehhs5EW2P3g7Lw7UNCOAfcR2QQ -p/dtwXYWzXHY9jTevQSv2q/x5jWKZT4ltaQExzvXAcxRGqyWV6d5vol3KH/GpCSI -SQvRmRVNQGXhxi66MjCglGAM2oicd1qCUDCrljdFD/RQ1UzqIJRTXZQKOno1/Em9 -xR0Cd5KQapqttPusAO6uZblMO2Ru+XjCD6Y0o41eCDbkd0xA3/wgP3MD5n41yncC -AwEAAaNTMFEwHQYDVR0OBBYEFJTry9da5RZbbELYCaWVVFllSm8DMB8GA1UdIwQY -MBaAFJTry9da5RZbbELYCaWVVFllSm8DMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI -hvcNAQELBQADggEBADA6qhC35PwuL7LRddbhjjW8U/cCmG9m7AIvH6N+Mw/k76gt -tJkEDxztMHUG+A2IPyEcYm7MLr1D8xEQYsq0x4pzFcQnMSQDv4WTK35vRxMtaqwA -WZTyA+DibBknbaP1z3gNhR9A0TKx4cPagN3OYFvAi/24abf8qS6D/bcOiPDQ4oPb -DVhmhqt5zduDM+Xsf6d4nsA6sf9+4AzneaZKGAMgCXgo4mYeP7M4nMQk0L3ao9Ts -+Usr8WRxc4xHGyb09fsXWSz7ZmiJ6iXK2NvRUq46WCINLONLzNkx29WEKQpI3wh4 -kyx6wF9lwBF06P1raFIBMeMOCkqDc+nj7A91PEA= +MIIDszCCApugAwIBAgIUOpUOL6Dz5+T+y+SIDknp8nOB2x4wDQYJKoZIhvcNAQEL +BQAwHzEdMBsGA1UEAwwUT3BlblNlYXJjaCBUZXN0IE5vZGUwHhcNMjQwODI3MTgy +MDE2WhcNMzQwODI1MTgyMDE2WjAfMR0wGwYDVQQDDBRPcGVuU2VhcmNoIFRlc3Qg +Tm9kZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK2bmzHyMB705hS2 +Vu02WaTz7iWU11aVlNwAEVWIpjarDsk1IeICYe2vtv7e9qAp5IAMC6y9Db4XAx6A +PKJHZ5XcrWKpJqanMUwMi7dJ7wLWauMlx4WdyWSdJ3KRVO0Xzdr6My6dV+LCiiYX +cQCFYzEQYX02kU8M8NZ3J9t5OK3MF8/f0gta5vMs/1akPJzTMYyLva+hcNyGC9pW +Ly0w2kWxqze00KjT8wnmUz3h6gxxRwwdocsyZ1AE635anRu2MuAo94sA8kwQdl6z +cKtTzlzbLmrBQzusnuQtJCKGzvH+uBGodFpQhi5JpYVbuSvqI1Lumg7RA524cb0t +OKnijBECAwEAAaOB5jCB4zAdBgNVHQ4EFgQU41fNVZMW0Kc5nmv53kKTINZT0CMw +HwYDVR0jBBgwFoAU41fNVZMW0Kc5nmv53kKTINZT0CMwDwYDVR0TAQH/BAUwAwEB +/zCBjwYDVR0RBIGHMIGEgglsb2NhbGhvc3SCFWxvY2FsaG9zdC5sb2NhbGRvbWFp +boIKbG9jYWxob3N0NIIXbG9jYWxob3N0NC5sb2NhbGRvbWFpbjSCCmxvY2FsaG9z +dDaCF2xvY2FsaG9zdDYubG9jYWxkb21haW42hwR/AAABhxAAAAAAAAAAAAAAAAAA +AAABMA0GCSqGSIb3DQEBCwUAA4IBAQBObbHtMsaa0XTJAlJk4DE9kHgZoxF8ImFI +c1huhnCr2X+XkKxYDF/QUA1XRDWI9S4/6xBDKZdD+RhZ6ds3CbG4JVtoJa1Vvjla +dk11uirkKCqbYrdyc/+KeLS4ruYhG/JoqycTp/G5aCrThZgIgf0jm4peJwd9nqaz ++yjP4L4sDR4rfdLIsk96hPKDImD+5uuJ9KqMj8DO589uqJwhTehfPcNfL4hVdQ66 +IEKK6HM5DMXYzRFr7yAseKZbXngn5QJ+ZBldikP0hgGFYbT1kbNtFOqwpYNvgGvr +ptei46poM3WCB04puszm62E4Jora6rxaLwWGp+6TWELLwUUs9so7 -----END CERTIFICATE----- diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.crt b/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.crt index 337d24e2493ac..9111fb215a448 100644 --- a/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.crt +++ b/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.crt @@ -1,19 +1,20 @@ -----BEGIN CERTIFICATE----- -MIIDIDCCAgigAwIBAgIUNOREYZadZ2EVkJ1m8Y9jnVmWmtAwDQYJKoZIhvcNAQEL -BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l -cmF0ZWQgQ0EwHhcNMTkwMTEwMDIxMDMyWhcNNDYwNTI3MDIxMDMyWjARMQ8wDQYD -VQQDEwZjbGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCCP2LE -nws2+ZIwSQ3IvIhVfrueUmNt7Y5TdhhwO32p2wC4ZA62J9L8klAzt7R+izcL/qbF -65inbXM0A7ge/2wZ09kbqBk5uS8jDetJS8lQmWVZDHfVi8g/yDMWklz2mQYleYmU -HPyIplai3P3KBoT8HurzHw2C953EZ2HiANFnGoEPZZ5ytcT2WenxuU5kSXSxuDyn -8/dCVHEQL1Yipr2LQKYQAHotjo56OhyL9KS5YPjzSFREeyRfQinssTmpGFsua/PK -Vqj+hRdkaqRfiqPq3wxn8oOSpZLQe58O1e7OlqgjkPuZdjZ0pQ7KJj7N3fUQNSeg -2VC2tk8zv/C/Qr2bAgMBAAGjTTBLMB0GA1UdDgQWBBQziDNuD83ZLwEt1e1txYJu -oSseEDAfBgNVHSMEGDAWgBSU68vXWuUWW2xC2AmllVRZZUpvAzAJBgNVHRMEAjAA -MA0GCSqGSIb3DQEBCwUAA4IBAQAPpyWyR4w6GvfvPmA1nk1qd7fsQ1AucrYweIJx -dTeXg3Ps1bcgNq9Us9xtsKmsoKD8UhtPN6e8W8MkMmri+MSzlEemE+pJZrjHEudi -Sj0AFVOK6jaE0lerbCnTQZvYH+J9Eb1i9RP7XHRShkR4MWgy2BzlENk9/LRbr84W -Yf5TuM9+ApiiiOoX9UfSGBzNnqwhJNpG9yJ+HnQSqTnJJc/wL0211zLme9I/nhf0 -kQx6mPedJ3gGoJ8gqz38djIrhJDxq+0Bd9SsdlR6yT+1+bY7hinYx2eLV91AybZ4 -x07Kyl174DD41PYaE1AtoLlrMrQ5BG7Md50Am+XXOR1X1dkZ +MIIDUTCCAjmgAwIBAgIURxNp9ImDloxqOPNAP0ySBZN/BDQwDQYJKoZIhvcNAQEL +BQAwHzEdMBsGA1UEAwwUT3BlblNlYXJjaCBUZXN0IE5vZGUwHhcNMjQwODI4MTA0 +MzUwWhcNMzQwODI2MTA0MzUwWjBiMQswCQYDVQQGEwJDQTEQMA4GA1UECAwHT05U +QVJJTzEQMA4GA1UEBwwHVE9ST05UTzEMMAoGA1UECgwDT1JHMQ0wCwYDVQQLDARV +TklUMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCp7qyGufu1cQYWJJGZ04XulVdwsKytMeLNSDHT90ratfsAy5WP3CRy +fug0E6nB7eykSHnE8aYomrghJIL0oP3v7b7vV/iasZ17Q2uiY67fQb4s6Rvrcov5 +R7ak5/B22uslDrDY0BaSWKCxHREb55rMhVWlVTXpm91kdGvo4Q61Gcxe45mweKR8 +UMbUlNuXrW/xwTwYI4pdDxha2ZXgTBrBJXppEh/KQp0rdy4Be3KG5IbqrH/Bh6cG +4CZ/di0i6xWxAhQOlOKlcTHpMAtXx0eBjha/Y9+p3/7z9fmE/JsYozw56r75CPDG +VpNiSDoPMPed4uhpbXQVYeCTUe3Hh8WRAgMBAAGjQjBAMB0GA1UdDgQWBBTm5Cel +/aWnBGFDUnZKNYs+BVFHFzAfBgNVHSMEGDAWgBTjV81VkxbQpzmea/neQpMg1lPQ +IzANBgkqhkiG9w0BAQsFAAOCAQEAjaXJN+NyS74cDTAtjVqo4e+h2K/LfYyIpdYp +mTDi+wRBlprJUDl18TK26c0hV6T4MN8QxqoqCXoEVJZWDjBYOUsl3OfSgPpT0aww +3Z/mIPOLb9mR1zOO9tXZhgNdFCLRRepiLyPRsRVQ3K3klle42DHaEIOUlwtqAArF +d9MKg9PShrRjqJwlm8vL3E8KjNeC8gAvebF3e7ADIatXjRK5Rc/LQhgPCaCZKSDF +w36AhGBnXsCgi3IR00E9CWOsC2UVeAhgHHaN1oJjuLfFupG/2Vx6Ii+PAgueE7ec +VWQeasxHihc0VjEYtSiNlYO6A8rcH7lg+0OCzGr97DC+zfFZwQ== -----END CERTIFICATE----- diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.key b/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.key index 95e11f79cea24..ca0c6ba868047 100644 --- a/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.key +++ b/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.key @@ -1,30 +1,30 @@ ------BEGIN RSA PRIVATE KEY----- -Proc-Type: 4,ENCRYPTED -DEK-Info: DES-EDE3-CBC,81AB10154C04B38F - -0L6Buvpeg6QHh/mbYp/3bXDCsu0k0j5xPdIGWd6NCOdb24OQFsOjeA2WuPqs0WWF -gzVrjh984biS3IqeglEr6X6PfVJ0QOgBkq0XgSBXhuoRJL/302N9oPGsf8T8oW9t -pqR/JIB2L7lMbJlJYSjMl0YQT3hWpo2BlrtSIc/GWOKfjDNWc9BL+oHvKJwql1lb -n4yMvYFYJDqgzgxa3r4IIQNsCn3SP+gqbTx9vF6StOIroV51BdSL4IGWRvqnMJrh -ybk1EHSLR1oGcONLU4Ksi33UxdImG70SsnoH/NnInDvV2bxmxmgf5SfYKtxFhoxz -0hISKTMTerPGtRQ5p8wtEi/ULKyInK+qF3tLgZa+S5VbByjDnUo2dCcbDDSkH5pO -uczJ2bs1kJegpCrUueJdbi9OX2upmF+tJb9+5hzFTvey8dUWTEpdiN0xbp4BLfNd -Yp4sMHZovsDJKIjDb0NbXRgLeFh1ijlLPhKwIXWTF3BaCKcSw34Qv22YPwn3qNuw -0KuUPAo0B65R/hoJguvtks8QAXe0S1jZS/fAlQCoIB0TIduy1qkyje+AnSW+1RL0 -ysBxLqbvRUqWlgnu7/28V4FD8JNu3O+UGBEelXlfokLgCBZ6lSys2d3Zy/XVBnG0 -cPl59if+fxKaMWlhFvMLFBup1Y4a/1zA7Sx6kkhvawekHr40NcG4kLHJ+O6UoM4d -/ibnbfIksLNkuo/nwoEcKp7W6SxafV0hROdxClkGKild66rnHtk4IGATjaBqt9nr -FuO3vRtLuUMS+/4kpvhMwl0RhX2/i6xgV+klWNYNu1JTGDFvdG3qfiY2w88EIbGe -rn8JEvRtaH/XNeGdhBwbuObvTifiHyYzA1i5Zh8zvE2+Dthlk19jbBoOUx//LOi2 -JrNkAsqQCF4HXh7n9HWA/ZrKTP7Xvkig6Vf7M2Y/tO361LSJfzKcRFLpl0P2ntEv -XwFOqTvOURERTVr4sBLOVPRAhIs3yvkI5xfurXzbRWtSeLgrMoDgJlXIQbuXd8sq -zIBLqvYf2bcroB66XJqX1IFWEstym/NHGcbrwjR5Fn2p3YAtXnIbw8VhHwV+LIOl -ky/wH9vbnML/DE81qFqRe8vNZw2sGn9skOyU/QvKeV1NRHYZSV3hMx82bPnjgFeB -ilzkb8FEPOAOJ0m44Q3C9eUoazJT8aCuRIAgSL43se1E2pFlIXQTfYRARaWEkSf9 -0hXqQJc17b+Hj0ire3PUqbG3+/l1qMhhIHwq7Kuyy2neTuW/DXbXp2AMv/bLcnHH -apVeRZaYXVSnGXJNk2CeRnCs8OGir8g5zkH+fmVb9knt6TL2oFIsQqULyrLolhfe -6Q8mLzq/sd+w+VuN1n/5+RQqOJZWEkLFzQPx8wTqeTB19OE0gjncrqzCHq7INqRe -tGClWOj/yL0Sciu3ctVGz1VAbgeBKnLdKm2TX4oFB4OG4E7GMXIL7hGxjtjLAVMW -XNc3ZYNQra+iPqJtFxnmbrF2Sn0Wr0hcAT1V0A0TRKe/n0lpUrfhTy/q4DUlOVKG -qdCsTGoYXObpUWU5G9GyCVWWRJyrTxJcBZ9KWJu9Y/aMFzoa2n0HQw== ------END RSA PRIVATE KEY----- +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFNTBfBgkqhkiG9w0BBQ0wUjAxBgkqhkiG9w0BBQwwJAQQO04hOVF1REJsgAkP +xkFZ/gICCAAwDAYIKoZIhvcNAgkFADAdBglghkgBZQMEASoEENoXPnjByIDKjwqz +3+WRgNsEggTQuv3EOfjFwF8f0fac2GjJJxN3L2b88CeKxbjTL/6kQ1bvWSI1+L45 +0zP6CQ+5lI3N9/0YFoCWX5y57e+OXafAWivkUp/LiGkYWcRnqGVhZgSQTFQP9rly ++3PUDLlM5FuGylKvoqYmTIBud1puBiChYj0FKImOyHgPH3/GEGbTSrtvCSZkCw72 +XkkF32/OtSbqTuGlGgl+pGLTtnS2+RhgiCzXMCtvHJqjhAh22J7uoYYqk02QKEme +GMWM4anxmLPBr/Rw04NrlEfgRl8mTIhgrgwKV/mwfK++kqboWpzfXPs/S4KHJxmv +WvVcxHovoyovBA87C8cY4Qz/PZzm9vZr/+hQCF0OJgvZejWiUiuRJ9HgeteKTEMo +CrOlyZXcaMHPCa8CK6U+lUBwTZbAAzMYSazfaf8524yDGksOA4J/KGC3uvviYW09 +hTaqhq0yGqBUe5mrgEEhSV2vIpjK6MKxMtvjKvc1fjfrYIL9BGiiHOCGaljQTQAA +yLZqQwlj//v4om3onR6HOfZeYsQxzH5zNFSIJa96/kBBWG9Q0ZMmqEqB52rNUT28 +ZapjaqqRkos/rBdvzDQzlyx+NjZnOsueEkC+cX/1psIoE+6vLbonMrlzl+SSqtxB +EuSD7dekZ7o3eQLzRI13ohRtzMv4ojWMpr769WsQ4KKflK7pLVdIYFZbL0Q44s/w +Bc9ByiwSGymhEO6uqqfBT1baj19yTrc3FU/jaJyIsRNs/EAc7c6nPejiiwxtE7Ex +oVSwbKoD2CXB/DYlenenBGvuP1jyHSkQqv2YWdL1bm9Rp8DNJ+HG0OP913fTuE3V +7ScOt2ZnR2B+VWN3Eu8MdiX16vi/ub/4H1HihANw/W5HSwuW88V7fGcbSzRWxyCN +5Od7b5y2zAD/tl+x4GXFZ9k+di2sZc7W6zzVqHr55nfxvsFvHt5dWipTxZFdVhRh +tXhGnYCfr1gKN4FdTW/MuYa3otHL4gVpnVdQ10C48bCljCaVdep/AhC5dj0GaTyx +VJBzzD5vp6zt6jsfjI059+zVyR5zxhEKeotURVTqzhz08TOHCkyQP0KRQ+U5ve80 +9cj1odt43JBXFq5w9/aUQWG6ZnBJQup/zlDdGncPd0+3Eh0WoQyDh/XlFosrxt7L +QF9SqN9oTIp9Fgr6yOFrDOamQAb6f+5Ms5XNegHmlqSkGcpJxf2JBNinrY4drrQ8 +GuVCQ94GhjdGMdSM8Vv8Yi+8RHyqn6R2hjiY4PX+86J+xFNOGr5RiXk8NUp5kM5s +ZfffpB0ELlgBQzEv2PV9hdh66M8EGjyQl4ItzXg3JhbiXOKAQLbpPOD22zcZsmm2 +r5E4vgRwYfHnmwqJsrIcvMK1m4USlGuwJYP5ExuwE4xdsaUNwKEd3gZAXzhV1YKn +HyBfJFwYJsBR+l9G9kt/ZWpEd2DNnfss7ujQYTHGQ6WT1zbKbCsb8aE1CNXXs93C +DtuMUvG+BRTwuSAtvWTf+XPcTjgTrrAKQq2tmsbDe3CEgW5r/4+OL6s3nxI/mVVg +4jOcUZ0bePBvu+4/jIRqlx2MZIFRp+vvR4RiQ0wYBcihW7Wed8y+ZWdHxg6eUlJP +WXwdmXsz+NFMXpJvBX0OgntVzxEdJAyGEeBArBJPAKmcbR3JfDWMQ8M= +-----END ENCRYPTED PRIVATE KEY----- diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.crt b/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.crt index 309ade87fbd78..317991a707a16 100644 --- a/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.crt +++ b/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.crt @@ -1,22 +1,20 @@ -----BEGIN CERTIFICATE----- -MIIDsjCCApqgAwIBAgIUXxlg/0/g3UYekXWBRpkHM84EYfIwDQYJKoZIhvcNAQEL -BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l -cmF0ZWQgQ0EwHhcNMTkwMTEwMDIxMDMwWhcNNDYwNTI3MDIxMDMwWjAPMQ0wCwYD -VQQDEwRodHRwMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAi8VQaSR6 -uqgT1Rkw+a39OSXcXuhJBVdoO+AyYPK7hdUTxj1aqnXkKeAiNGpe/J+uXZ837Spy -rmBZS3k6S5hLEceF2xug8yrR7RYEZ+JvGlRgg/jj+61gGbHAD314+vvu0YUo06YG -wbz9AnjJA/sMbsCp3iSzWIkwZBZcCoZ/YsG4I89LSjYL3YmRi2193WMX6/OfQYMN -Fkv61r/iwBEkgJ14cUSYe3norGuQfZuXSh5kI5D5R7q7Bmb0um+jzY/l62kj3oR1 -YWo3g6DdU/Bc/3/KmEEVXIfdTonMBMyL8PvYORoMKrYdph3E8e39ZQhPeBJNJKw0 -XzsZFzIUlTw0kQIDAQABo4HgMIHdMB0GA1UdDgQWBBTiqknjZLa5E1BneHRvTkNa -Bm4nNTAfBgNVHSMEGDAWgBSU68vXWuUWW2xC2AmllVRZZUpvAzCBjwYDVR0RBIGH -MIGEgglsb2NhbGhvc3SCF2xvY2FsaG9zdDYubG9jYWxkb21haW42hwR/AAABhxAA -AAAAAAAAAAAAAAAAAAABggpsb2NhbGhvc3Q0ggpsb2NhbGhvc3Q2ghVsb2NhbGhv -c3QubG9jYWxkb21haW6CF2xvY2FsaG9zdDQubG9jYWxkb21haW40MAkGA1UdEwQC -MAAwDQYJKoZIhvcNAQELBQADggEBAIZr8EhhCbNyc6iHzUJ/NrUGht5RDHUKN9WU -2fd+SJlWijQYGoFW6LfabmYxIVPAFtYzUiA378NFoOZZ4kdC3gQng8izvS2UDcO6 -cAG5q/dxop3VXqcLeK3NpH2jd83M8VZaOThPj/F07eTkVX+sGu+7VL5Lc/XPe8JS -HhH2QtcTPGPpzPnWOUMLpRy4mh5sDyeftWr2PTFgMXFD6dtzDvaklGJvr1TmcOVb -BFYyVyXRq6v8YsrRPp0GIl+X3zd3KgwUMuEzRKkJgeI1lZRjmHMIyFcqxlwMaHpv -r1XUmz02ycy6t3n+2kCgfU6HnjbeFh55KzNCEv8TXQFg8Z8YpDA= +MIIDUTCCAjmgAwIBAgIURxNp9ImDloxqOPNAP0ySBZN/BDUwDQYJKoZIhvcNAQEL +BQAwHzEdMBsGA1UEAwwUT3BlblNlYXJjaCBUZXN0IE5vZGUwHhcNMjQwODI4MTA0 +NDE1WhcNMzQwODI2MTA0NDE1WjBiMQswCQYDVQQGEwJDQTEQMA4GA1UECAwHT05U +QVJJTzEQMA4GA1UEBwwHVE9ST05UTzEMMAoGA1UECgwDT1JHMQ0wCwYDVQQLDARV +TklUMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCk1Ot2RGbUS3yJchvdtrcGJPoR8cTTUfVVMMRT+btXayllbLQd/cHV +jP1DxauXiLQs77R3NGfPs/Sk7fGQh6p/4F52F5wlNqG/Hq0MquqjXEo/ey8i+p5Y +zTB8v2Hv6RwN0HLB2uiAUOWjHvddiz36nfPmQ5jlF+IsR36KMb6AWHaB60kUabZL +vPOrtw7KZMkHRC+3tXvvepNe3uAKTIOEeHJneNNc76ShPnjANev7ONpNHgvMTJDY +nbNtDL2WnHvnyEwIgWLOnJ1WgOAsiSpebPqibi+25FirFKGTB2qp2NfU+tCoK7hG +1nPfPSCxBEqhwoJOywft2AxhDoicvo+HAgMBAAGjQjBAMB0GA1UdDgQWBBQ2Dr4v +2/aWi1JSmXfRITKOTlwa+DAfBgNVHSMEGDAWgBTjV81VkxbQpzmea/neQpMg1lPQ +IzANBgkqhkiG9w0BAQsFAAOCAQEAXEmxgNViixLWVQx9EgWscxaiI4d4OFd7Dfb/ +11qRtKoobEuSK5lOhDim8hZfs+iueKHuT/bRJ59Yu/p4GS+ZeJRgEXfCdY9S3Zeb +qGCi/IBRT1oq4vD3OSWA88C3I+pGXRb7R3fvtIcfy42o1FdHAg3MOlRx7fZHtAdE +GJ4SRsKTex7phWvKZ14R+wj45B8dA8Ty6/6nzPqb5+SLa5w37jU/gdew2cW2lEaN +tZb/aj1l5LmxXje3mvVag5SR2ussDrARcRu+uW7qYq0IzzQDxyzwpEWPC/QsgEme +9GFPd3xNu4tSoM0arrK8xjNtEh4P2gokhNJwy+vDGvKMrrWjVg== -----END CERTIFICATE----- diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.key b/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.key index 8b8d3b4083c67..68b61c6d6e03e 100644 --- a/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.key +++ b/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.key @@ -1,30 +1,30 @@ ------BEGIN RSA PRIVATE KEY----- -Proc-Type: 4,ENCRYPTED -DEK-Info: DES-EDE3-CBC,127A4142FA81C5A1 - -dP6oSAUl47KCnP0YZSX108qcX5s2nVGpD0qtnVQg89mLVFd7IxpKQaIuODSadRTo -AD0KINITy3ZwUr/TTJgERu88baBsTHv3PLEe7TpQI2DGGDz3aZfO9e6Jvglbdi5b -CBLaxRXGGhhH9YH0E87Lp3JEwg4udWmlNahGIhbqNheZNTtDKt+Lx80TyyIml2r/ -GAhjT4UPvIRrATFAcL/3EKOjRqvb6SeGnZu21n2TSmsBEr02gC0Ox3qmsnRM3kvU -jCuUzWTzJSQLXZwZuMtv5srOSFAbU8EklFXNhWJU/7GBy215aAAW48hCzkPMVEbg -oeH4nuze/Uulih9UxJGCBIpvfTnksyMRGP/zdy1mnKuqQk+yI0n7JWMJL8QoDQc8 -XvzqOmKLdBVezmzOVP/PyMAhYWetILh/1UesjyJot2hwSXPAxqBHPVA9bnmel6CQ -VccNSwaK120yT5YhkUMFc0AmUpztzNMQzJ10g1dW+Qsr+n4vtFmAuTvBgogNNVXn -eX1hbbiXGO1Fw4OMu6qTJ4T/P+VFb0CxoxETWeqdjcs4LGbeqF68nayEsW0ZzhbI -W5c+JAbW18Kb+k/KzKZTtJEXBw6B/2FMe9x9z3BIpVhplM2KsNk7joWnumD8LfUT -ORRHUPV7bkdiDsn2CRaevubDQiChcjsdLWhG7JLm54ttyif7/X7htGOXPZLDLK8B -Vxe09B006f7lM0tXEx8BLFDNroMLlrxB4K5MlwWpS3LLqy4zDbHka2I3s/ST/BD4 -0EURHefiXJkR6bRsfGCl3JDk0EakcUXM+Ob5/2rC/rPXO2pC0ksiQ2DSBm7ak9om -vlC7dIzVipL0LZTd4SUDJyvmK4Ws6V98O5b+79To6oZnVs5CjvcmpSFVePZa5gm/ -DB8LOpW4jklz+ybJtHJRbEIzmpfwpizThto/zLbhPRyvJkagJfWgXI0j+jjKZj+w -sy1V8S44aXJ3GX9p4d/Grnx6WGvEJSV0na7m3YQCPEi5sUgr+EMizGUYstSSUPtU -XhxQRZ95K2cKORul9vzG3zZqqvi73Ju5vu9DLmmlI00sLzyVGFtvkuhrF2p7XclM -GU/rMOeMClMb6qyCzldSs84Anhlh/6mYri6uYPhIGvxqtH44FTbu1APvZp0s2rVm -ueClHG78lat+oqWFpbA8+peT0dMPdSKDAFDiHsGoeWCIoCF44a84bJX35OZk+Y4a -+fDFuSiKYBMfAgqf/ZNzV4+ySka7dWdRQ2TDgIuxnvFV1NgC/ir3/mPgkf0xZU5d -w8T+TW6T8PmJfHnW4nxgHaqgxMoEoPm8zn0HNpRFKwsDYRFfobpCXnoyx50JXxa4 -jg095zlp8X0JwconlGJB1gfeqvS2I50WEDR+2ZtDf7fUEnQ3LYJzP4lSwiSKiQsQ -MPjy0SMQnqmWijylLYKunTl3Uh2DdYg4MOON662H3TxQW8TCYwK2maKujwS9VFLN -GtRGlLrOtrOfHBSwDCujFjqEmQBsF/y2C6XfMoNq6xi5NzREGmNXYrHbLvl2Njwm -WB1ouB4JzmEmb1QNwxkllBAaUp1SJGhW2+fYOe0zjWOP9R4sUq4rRw== ------END RSA PRIVATE KEY----- +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFNTBfBgkqhkiG9w0BBQ0wUjAxBgkqhkiG9w0BBQwwJAQQprhRDFFTnmWmHgAB +ULpI4wICCAAwDAYIKoZIhvcNAgkFADAdBglghkgBZQMEASoEEEuzT8itQgHZfKb/ +ReywEdIEggTQD117YFYRhSSivErIhTKQSuofhH/ZgW6nYnKlcDT08bgNQjbEg94a +QZqsPl9D6tfcmg7XlNTEiQpnSnsh6LrrhQbNkt3PvJxfUUy0ATVXXdH538RcPLAC +K2NHi1iwSbnqdcBU+/Be8M1F9e9P5hx6HbJGEF/JIkpWDDmOoCGvlwfH0PSiliY4 +uqxsmekvNgz2GBhELZj4sEJ7C7/I26vOuzS6suDn6xGF8JZIg8i7upamUgLoBtG/ +waxlmfTx+hkYFDQGcy9jvkV043sK/hLTOycUGhmS1ybQSf9ANbsM8RjOIq6QxpIZ +wtV/7EzqDWYradQBRrhAP24yzEj6H1cTr8yMmD6JuxvGZ7uQpTCRiFopB6TgK+x+ +2HqEgeRyBz4hU0i22kyGHC9sSG9WwKhmXhfcBtzJi3JABbkeg9LarwOzbh51DaxN +/gTop4UYRTYbJB9bhcIU0Y5xPSSphphCWmGuBU6CinsBj1w+UBP137GzgnXvV6PL +S8tai963P38Oafw/T2IyFTyAkuHJJ5MjVc71Q+vYLzfu4SfBdSIb1oFPT4otNwHP +NbPvTYq0DWnHFNeIc5vmLJJTWVemBTkxvHr+WfU8meFsjxZT05gzgOk+5BZFya5h +oV53mYQYPSyJiBUz0icHyyzUWaEHQLXHrmE6i+kW7+b4lrhi7KV1AMGRSJXUS9/Q +I7NuCQG3+iCyMd+CupvsiK7xjOytgCstwWIGeHlSmYwS+txi1wpbBJ4X6NQLlHyy +KZoFxyWTKtEdX1QKioBxeoKVy5G5LOh7S/jd9jEsZ2C8snFnDbNHALBmXIH3fshA +bo4keel427V6W3f9/u0nT1RWrYiBK12XJiS3/kXg8krln1Xb/MkgTKmLEZF+VDXO +Y3QwAICNM6/235siHuQG+uJ/WoL9xd1R22/+2mxNy1Rdhd49n8GFg0Kjsbmd+hL9 +aMwRU09SNNPCwdAIHmoMCIYS6uTX1bcGSzMir16JepmIYQllwdOoLk2nxtBCaHwj +ZLYO21W+iFgo4TwXzkuaI2q3Ll0n79BJUVdOnz8hBCq0Ox7sTEY7g1vQGHIsBx98 +PYZmaaXVh+u2chHKrwp6L9mRikXQiNWwtqTH/kp7BydRnYIcaP27SCM8HbaYfV/x +02FjBbpZ7u1PwS3jlGmcxE/qTd+cLkk3pm7WPPMlOnMh/X5N3/OpznUgJnVRtGqk +uDy4HSE5vEhHDp0F67R0ph8/HfIBamvJIoonYzoC2iEMgL4yqL0x44SOCioXScgz +hluYX1kQRfyXWjoP+vBBOUapwYDwk1gGXap5iQjtiVq6FN8DspckHRVI5B1voVIC +37Mn2OXH9JloObouLYMRa1dDm7h+/3Cb9UAhKpOjpLc1apA49+Rjtq1gBExhac74 +9SwrcQJdRx0NDJjoMHKrGUFkg/W+R7OTad7+l98M473nWuV3mzJDXcuxmam9llRI +2O+1QsV5hjd4/zCtIka+pOALp+cVSmktTjKNh105asX7d4XIxtg3M+FJWTEODZfy +VulvKri/rkrbCBwMQyj3TpF4AkVjhSM2P5j7LRsivfGc8VL00OqYJp9pYfav38gs +EpYOmaDEV/Ls744WSJJo5Qq0EpDclBTFjky6kZx7RDfySUzfN/Nhv6A= +-----END ENCRYPTED PRIVATE KEY----- diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java index f80ad901ce765..563f89b70545e 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java @@ -127,7 +127,7 @@ public Optional buildSecureHttpServerEngine(Settings settings, HttpSe "password".toCharArray() ); - final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("SunX509"); + final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); keyManagerFactory.init(keyStore, "password".toCharArray()); SSLEngine engine = SslContextBuilder.forServer(keyManagerFactory) diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java index e0600aebd90e5..e573a9d018862 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java @@ -83,7 +83,7 @@ public Optional buildSecureServerTransportEngine(Settings settings, T "password".toCharArray() ); - final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("SunX509"); + final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); keyManagerFactory.init(keyStore, "password".toCharArray()); SSLEngine engine = SslContextBuilder.forServer(keyManagerFactory) diff --git a/modules/transport-netty4/src/test/resources/README.md b/modules/transport-netty4/src/test/resources/README.md new file mode 100644 index 0000000000000..50cbd432d32c6 --- /dev/null +++ b/modules/transport-netty4/src/test/resources/README.md @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# +# This is README describes how the certificates in this directory were created. +# This file can also be executed as a script +# + +# 1. Create certificate key + +`openssl req -x509 -sha256 -newkey rsa:2048 -keyout certificate.key -out certificate.crt -days 1024 -nodes` + +# 2. Export the certificate in pkcs12 format + +`openssl pkcs12 -export -in certificate.crt -inkey certificate.key -out netty4-secure.p12 -name netty4-secure -password pass:password` + +# 3. Migrate from P12 to JKS keystore + +``` +keytool -importkeystore -noprompt \ + -srckeystore netty4-secure.p12 \ + -srcstoretype PKCS12 \ + -srcstorepass password \ + -alias netty4-secure \ + -destkeystore netty4-secure.jks \ + -deststoretype JKS \ + -deststorepass password +``` diff --git a/modules/transport-netty4/src/test/resources/README.txt b/modules/transport-netty4/src/test/resources/README.txt deleted file mode 100644 index c8cec5d3803a4..0000000000000 --- a/modules/transport-netty4/src/test/resources/README.txt +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# -# This is README describes how the certificates in this directory were created. -# This file can also be executed as a script -# - -# 1. Create certificate key - -openssl req -x509 -sha256 -newkey rsa:2048 -keyout certificate.key -out certificate.crt -days 1024 -nodes - -# 2. Export the certificate in pkcs12 format - -openssl pkcs12 -export -in certificate.crt -inkey certificate.key -out server.p12 -name netty4-secure -password pass:password - -# 3. Import the certificate into JDK keystore (PKCS12 type) - -keytool -importkeystore -srcstorepass password -destkeystore netty4-secure.jks -srckeystore server.p12 -srcstoretype PKCS12 -alias netty4-secure -deststorepass password \ No newline at end of file diff --git a/modules/transport-netty4/src/test/resources/netty4-secure.jks b/modules/transport-netty4/src/test/resources/netty4-secure.jks index 59dfd31c2a1567c6fbae386aa8f15c563bc66ae0..d158f1fe60ef73fdaa610ce8e92a398e4212f20a 100644 GIT binary patch delta 2521 zcmV;~2`2XD73LL?b`-2bsR&Z_>1Z`R1*Lm6UWDR37kraH1|)yv&4@(E?*;s*kwb$P zLCov~f&|b3yv&SmU#EP@XkC5rYDlv`1$+I0m+fF03Zn)vEv;8qtK>O%+1GjAQbr<- zKa;e@BY36x>$Uov1e4am_lJ zx32X@sL$FlM+QpRy}#{%I;Tryx>wiV2Li?ITv`l&SPa0tlW%ng_f3>FzRQD4RQ^$} zg&L4Ao7oz4?vR_Bf<{c^)#;o$c==|EhQ$4=34~p*!1bLOQpHwdgZ{d)g z3{+5L&aiY;c*ftSi)0`;5g~9h`2S`*YsrLzT{eG1G^-q41vwByj7{|qoQa>Al0|U& zJqg6^^*~Q2YBiEvZ=RVpPad!xc3UgY4lQM?t!K)6^X3H9XJbr(Fa2PLUSkQC-81Z@(6*s zI)i`mb*Y+e$WWPp^mqVX6*trZlsMUK1~sVGyk%^Cha+O}q%>1OCd8&ujh@rz(evH1 zla=ptb!KV&X$l|9tENlY6+GdExCt_xFB9$y@RQF=D#7 z`P8yqpWP3Wpu;Z$BY;!H;J3^xWN=5c6M8$d zRo$152a?5dd)u&5X8P(U!;4=n$ipmisagF_hnEty;=5d|Z$yGT^r+~O#TmSWL5~*X zjdc%fCK8#;@e*KpzMy zF-|Zk1_>&LNQURbYfgE zF?W^E_Q+wAKL#X!23am>aNyhp*U{tS*0M6SVGTt# z2yLOt$_*l;Jt_wQQf^w;c=fdx(wD=eK2(q36JgXVBy}Blu_<_tNh|kU**D&8j4sCj zAf$+2i3C9L-e+Js_Vyi}m0=6*F&s;ALbaNYsLn@!Dtgo#(#e#$(m=Xb)xB%*kvqKD z8EOI$AD;ky-jkx+4)CB}3~vi-_4#VlG8xD^@I!#YKRy}zE|HksPPp!#e0V_j3je_W z_x3ht^}~r=4SwZ456eU_u8QWMXyaR+QM^*?rb60% zY(KGzel-wY414%K}=jM>hV>7 zu$*ir*FtxosGimZlyjoWW|(f9ie{~jX0kR$j_tWuOpDLiJvdq^2(3_*5=UNS681VK z92S^Tb3ugm;AfTr2dowG6~d3vAqu;`I{hVq@gnXP9KW#yNQyoPvP~eaFjJ z|CVUhl(X7S_2{T{sHWkcGnQ!Dq&_yJY|63t-p*x*qV@YL80MQQg+P-@e;~Si3_&lu zaKqbG6*rL{mi~?KUW-gzi+fSHAu0Fv(#{hY@+M>qTY}IYBr%VGJWdmrL=ne-c;5C$ zlYTyTOeyRWQW8(X#l+u98+b(L(#8{Z2`iqp2flD^fO6^-4f3OwLD|11u zn)4LMua3hUK(p!-gEWQ`S3)srU zpJ<2Y5)&>6_fp^xp+6jUFRE*OWAud-HTe+#>>!(~RT_JdXL>TE?pR*zWMG#4CHB!N z_C6Sq!*VUcfFXEat=gG_QQ#p>uar$B4k)Lgi*U$KdYXShC-=o+?!hBRL%6Q0J}H{S zJ=>!2x||9Ui?_^!GrrSx33MB`1}R}fR{R2ceq6x{Ya`59VyHsy^c z84YMGyJmkjr_xFMiEq4foE0iFGe{D4?)*e^D~BO`MX2t~r2x?Sr@Gz`Dfgydfx*$ZR3p?!g!`7o`PwnLW-ueZO{X=H*>_WgbV9@VCry z261uU34P$TtpEzk5vIgS#l6RHJF_ACeaB(i+!7lnm*(*K`V-LRUrr+v3d2Zmcfs4; zy5h6yjfx#?c2IwH(lvj8I^AGC7IA}k-9dkEx)70|w)}T(UC+Irr+zvt;)*RMNHI%S zwOwc8)?mW?+m{9v?JIT;y#Mate6-RLz%8}Yc%Lp_F>RlhJ`-nBI{@k+rJ*)gR5xRC z?RH6(Un~F`1xIM=P?n(B*ostw4!B+_Rb1r z<#000*@bbuo#~&IisrJ^5k$(@a0Qw$TO_f!f%UsYS?D`E`?zeltcQpXdEkBP-mcxV zSJQ zWHSyZIFuc(NTk!?c(HOUMQYLEPToAhVi_K0jCAtV;B;^HZ59w`V&tz}4o1<0eFrLu ziH;A)*G$AZzZ-?t-AAk&L<$OTiNUbsmM75uMNu9|YL0i#PB!`NR(d0o)O3FoUr)K- zn?SBH8F9kOeY)XDUNv8C(A}IC9ikxn9u{V$Iv$bngmT%atO(d_n<)*e0_b7;tT2Xz z8E>c~ivpJjQ(zT;hNw~a!vq(-%!6^CiiV@Hh-m#nd!)&V4G-SU04BEfwqzYM0@HDR zF-|Zk1_>&LNQU1CwTI9!q)1Fq zBNfM9&FY+!KL#X!pYb3Oo6gAe1~KMGts78(fPw@d%hCVdm{lW5Q#HBwt<{?EMe>GK zN==RgaV*y)X8c~I+^8n)ZZKjg*;2_u-lR$|0%LD>Vs6{J^;_&*VIhHz>aV{U@idsC zs})6%^WpoI9Nmh+hd?a3hsvP3Vl0kLv24N7=SH>aCGQ-6+`#ra4|~=FD{tC`^g(md zJPmaFJhwl1#jKebxbPNWHJB~u<%#_;kglUlAWaz+@{i?6<(WvgW@4(fSwJn)`VOHZ zNPm3=2WO?qt6E@g*@bDP)$x`ncey3@YRuU`;VS;_rDFMPD_~z$z%l*QaAm%Oc@|ZHx_=pFs zYs1Cbk;vhc-?P3uO;Sgvnp2z88$frosus3XY$c5>l>}^9AqsPuSvO}J6!)&h9`DD! zD!gugZ^jz0{7a(YC`vsRO247p8w^&`_2bvY|OIpB%5q_gInzhH~;TOQsU zouVqm>V%P(;P0|M@bb<<&sLsL;%`O_~cC}Z3 zRy+bT+k&fIvDEN)1bK9AznNEVfQ&WuIPbvu5`_L68*hY_=-2kzpu=ULO716y!^s;t zR#n?)uiIsZ@a^+XW;O0O?PWwLmU@}QSE>E9{jEaI>5xccHKzK%?T!s0xzWe&PJAvY zk6-#6;q^s*W1%x_j6GD9DL$DKt3xDz(gQ9SL_s3yC_wOOO7u~Q^pY1DK-v3|Ge z3F0O9jB=s45do%OiY3bz9S4XhJsO6FY{f}WX)+=*T{8e@!_K@Onj;O*c0Z$=e0$P= zrSHLXFikKqFbxI?V1`HmWdj5P0R;dAAe8@J4!(Yu^PKN2Bm$lCa`TKfz=b#7jxnyU jYy*8_4u0sHGtEP%2W1{=gGb>DV(5!@AmxDgO;&T*6xdkhK>_?vBRbRC@E)X6$G{!s6CPLxV{$o`sV=~!EoWGu=nTC?Rh#N`U-puvz zLSvd`pYX?tERoss3zIabhY^YvBkx^^ZO>(ma+s;zrT$}vp)q7~x^TtW%wO0*)SLbuhY&`-Xq?-|G;P;*Tot}vK5czFRS`!DCzk;6S>*tB;V&2Td5jKRYdx8wc zM%$X~bAtI?rT5US-nl-V{qs6!+H4En2c7|#644UYFe-RZT7ln zQJ0xXrmIAWgJO-)7~xh7_%gq5MN)$2G!{}gW&P!n@Q*wC5fhj`#PwT8bbekNS-@slxl* z-qUT2H+8Q$X5^_((SJG3O0&>>GjS68uREx9g;mP7t)w>YC|2)4`wPO_py!W8WR!6| z(Z!55XY@D|;|ptak9Ey{YiU}kb6@FRK}S+%a-|1p=Hj7PKR(f=S6fak48jm)K*;R= zqrq!!u!cQO*~Yfs`tZ7%s7SKoE%UBV5T$|F;c!Xbh=D=fN2YZRSw7FL>HAV>dUg1I z)4LzPgCnuNuc)hUMZx3ej0Z|@l{BQBT*9d|sKl+sMrm8Dx=j1PAH$Q+cL%3sL`>2R z^b2xdn$JECm}3ebIj^G>e*rn-TqIk;h zm}v^s8LVfP*3XTZJoz3UlHXM&Bu?$HM%4F-sT<-*{6iwhr3s=lw{M;;{L}qGDLp;4 zi@@TG^9qdM*cMX1mAqNYb$lNd)z-a`AQlioyAzX`1Zu?G9G^GKHTk`UFTU%$-1D~t z9a;o~+`Q zs#A=)KtUkc3+1X9JGivQk0T|aUtf7^nZPq&4nmvj?~J5-DTf9=p?U)XI9`T5G?39k z~7JSWAa$s}dWJoO(qZhy2 zW|v+gOX$1VPd_8?aaOntJzad`Ws$%;a?X4wRIzhm!cQO!5UcwPVfWN^aCd?u_6meX za8O%;mYdb!ac`LcEIbi*Nn z$Z9S@e`hp5aKb*e)E|SOpB4JnsxTtlsA~4Nl3ZV%DL^x`{6rvYg}FR*k<-Y;0DE@} z#3(t98(=v;4g`mMti-iS1>F^h=!6TJUSPpDd^; z4=>FyAL;qIo_0>I%9BBBJX$ZcS+wr3LINU|0b((Efpt^9r|M05Ud^fLjrwGF(;=AA zQu$*$u1hX#^D)w6x?7?+P7$fRh2SI7SO>aNzQW zoBcYVH_KSps%LlKsd7?(3I^b5Ud>|DA){NrXnzHN<6C$4sOsn8ou03Sy(8u((5^J5 ztUI3UN(%@(8Bu|~ozmjpa@Jr|kKDL2cN<675UQRI%{w-|#-erxx*ulSFror-Br#eS z$#?wBEGjyzV%^2aYm4C-ai-ozTIZhtI$Ln0&e~HJN5j=3A@J%M!^GnIS zj#kIri|f+gbE?~c>t%Fen^`BDS1+B=o<<3iUwR#;A7hxkx-xWQ_^@Vp3BIDvCMs9I z#lToMM|B)F5MoE#Jdwq4;I5%)UOju|T=d$Ry6d9#>3nIU@1xWSL8PW4CU5%$*@nNrc!H7xDJPa zZ=&*(?WSq-Z4bpqs)909IIW&F< z5$VTtvMw)Qe(knHJUlFdgZTQ?{Owptksuzym^&glW%4O5`yta(Q#;q~bM;3C-0WID zWscpTVs08zdo~jC$1f>l9j^%NDz+Bb|ET^>EEvyDd!*Jgk%|P%W2>EMV&*T zQM`YCKM)WC03&3!eU%ip;M|@a7S@R&`86xqY05<34@ buildHttpServerExceptionHandler(Setti @Override public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { try { - SSLEngine engine = SslContextBuilder.forServer( - SecureReactorNetty4HttpServerTransportTests.class.getResourceAsStream("/certificate.crt"), - SecureReactorNetty4HttpServerTransportTests.class.getResourceAsStream("/certificate.key") - ).trustManager(InsecureTrustManagerFactory.INSTANCE).build().newEngine(NettyAllocator.getAllocator()); + var keyManagerFactory = KeyManagerFactory.getInstance("PKIX"); + keyManagerFactory.init(KeyStoreUtils.createServerKeyStore(), KEYSTORE_PASSWORD); + SSLEngine engine = SslContextBuilder.forServer(keyManagerFactory) + .trustManager(InsecureTrustManagerFactory.INSTANCE) + .build() + .newEngine(NettyAllocator.getAllocator()); return Optional.of(engine); - } catch (final IOException ex) { + } catch (final Exception ex) { throw new SSLException(ex); } } diff --git a/plugins/transport-reactor-netty4/src/test/resources/README.txt b/plugins/transport-reactor-netty4/src/test/resources/README.txt deleted file mode 100644 index a4353cee45a97..0000000000000 --- a/plugins/transport-reactor-netty4/src/test/resources/README.txt +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash -# -# This is README describes how the certificates in this directory were created. -# This file can also be executed as a script -# - -# 1. Create certificate key - -openssl req -x509 -sha256 -newkey rsa:2048 -keyout certificate.key -out certificate.crt -days 1024 -nodes - -# 2. Export the certificate in pkcs12 format - -openssl pkcs12 -export -in certificate.crt -inkey certificate.key -out server.p12 -name netty4-secure -password pass:password - diff --git a/plugins/transport-reactor-netty4/src/test/resources/certificate.crt b/plugins/transport-reactor-netty4/src/test/resources/certificate.crt deleted file mode 100644 index 54c78fdbcf6de..0000000000000 --- a/plugins/transport-reactor-netty4/src/test/resources/certificate.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDkzCCAnugAwIBAgIUddAawr5zygcd+Dcn9WVDpO4BJ7YwDQYJKoZIhvcNAQEL -BQAwWTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM -GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MB4X -DTI0MDMxNDE5NDQzOVoXDTI3MDEwMjE5NDQzOVowWTELMAkGA1UEBhMCQVUxEzAR -BgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5 -IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAzjOKkg6Iba5zfZ8b/RYw+PGmGEfbdGuuF10Wz4Jmx/Nk4VfDLxdh -TW8VllUL2JD7uPkjABj7pW3awAbvIJ+VGbKqfBr1Nsz0mPPzhT8cfuMH/FDZgQs3 -4HuqDKr0LfC1Kw5E3WF0GVMBDNu0U+nKoeqySeYjGdxDnd3W4cqK5AnUxL0RnIny -Bw7ZuhcU55XndH/Xauro/2EpvJduDsWMdqt7ZfIf1TOmaiQHK+82yb/drVaJbczK -uTpn1Kv2bnzkQEckgq+z1dLNOOyvP2xf+nsziw5ilJe92e5GJOUJYFAlEgUAGpfD -dv6j/gTRYvdJCJItOQEQtektNCAZsoc0wwIDAQABo1MwUTAdBgNVHQ4EFgQUzHts -wIt+zhB/R4U4Do2P6rr0YhkwHwYDVR0jBBgwFoAUzHtswIt+zhB/R4U4Do2P6rr0 -YhkwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAveh870jJX7vt -oLCrdugsyo79pR4f7Nr1kUy3jJrfoaoUmrjiiiHWgT22fGwp7j1GZF2mVfo8YVaK -63YNn5gB2NNZhguPOFC4AdvHRYOKRBOaOvWK8oq7BcJ//18JYI/pPnpgkYvJjqv4 -gFKaZX9qWtujHpAmKiVGs7pwYGNXfixPHRNV4owcfHMIH5dhbbqT49j94xVpjbXs -OymKtFl4kpCE/0LzKFrFcuu55Am1VLBHx2cPpHLOipgUcF5BHFlQ8AXiCMOwfPAw -d22mLB6Gt1oVEpyvQHYd3e04FetEXQ9E8T+NKWZx/8Ucf+IWBYmZBRxch6O83xgk -bAbGzqkbzQ== ------END CERTIFICATE----- diff --git a/plugins/transport-reactor-netty4/src/test/resources/certificate.key b/plugins/transport-reactor-netty4/src/test/resources/certificate.key deleted file mode 100644 index 228350180935d..0000000000000 --- a/plugins/transport-reactor-netty4/src/test/resources/certificate.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDOM4qSDohtrnN9 -nxv9FjD48aYYR9t0a64XXRbPgmbH82ThV8MvF2FNbxWWVQvYkPu4+SMAGPulbdrA -Bu8gn5UZsqp8GvU2zPSY8/OFPxx+4wf8UNmBCzfge6oMqvQt8LUrDkTdYXQZUwEM -27RT6cqh6rJJ5iMZ3EOd3dbhyorkCdTEvRGcifIHDtm6FxTnled0f9dq6uj/YSm8 -l24OxYx2q3tl8h/VM6ZqJAcr7zbJv92tVoltzMq5OmfUq/ZufORARySCr7PV0s04 -7K8/bF/6ezOLDmKUl73Z7kYk5QlgUCUSBQAal8N2/qP+BNFi90kIki05ARC16S00 -IBmyhzTDAgMBAAECggEAVOdiElvLjyX6xeoC00YU6hxOIMdNtHU2HMamwtDV01UD -38mMQ9KjrQelYt4n34drLrHe2IZw75/5J4JzagJrmUY47psHBwaDXItuZRokeJaw -zhLYTEs7OcKRtV+a5WOspUrdzi33aQoFb67zZG3qkpsZyFXrdBV+/fy/Iv+MCvLH -xR0jQ5mzE3cw20R7S4nddChBA/y8oKGOo6QRf2SznC1jL/+yolHvJPEn1v8AUxYm -BMPHxj1O0c4M4IxnJQ3Y5Jy9OaFMyMsFlF1hVhc/3LDDxDyOuBsVsFDicojyrRea -GKngIke0yezy7Wo4NUcp8YQhafonpWVsSJJdOUotcQKBgQD0rihFBXVtcG1d/Vy7 -FvLHrmccD56JNV744LSn2CDM7W1IulNbDUZINdCFqL91u5LpxozeE1FPY1nhwncJ -N7V7XYCaSLCuV1YJzRmUCjnzk2RyopGpzWog3f9uUFGgrk1HGbNAv99k/REya6Iu -IRSkuQhaJOj3bRXzonh0K4GjewKBgQDXvamtCioOUMSP8vq919YMkBw7F+z/fr0p -pamO8HL9eewAUg6N92JQ9kobSo/GptdmdHIjs8LqnS5C3H13GX5Qlf5GskOlCpla -V55ElaSp0gvKwWE168U7gQH4etPQAXXJrOGFaGbPj9W81hTUud7HVE88KYdfWTBo -I7TuE25tWQKBgBRjcr2Vn9xXsvVTCGgamG5lLPhcoNREGz7X0pXt34XT/vhBdnKu -331i5pZMom+YCrzqK5DRwUPBPpseTjb5amj2OKIijn5ojqXQbmI0m/GdBZC71TF2 -CXLlrMQvcy3VeGEFVjd+BYpvwAAYkfIQFZ1IQdbpHnSHpX2guzLK8UmDAoGBANUy -PIcf0EetUVHfkCIjNQfdMcjD8BTcLhsF9vWmcDxFTA9VB8ULf0D64mjt2f85yQsa -b+EQN8KZ6alxMxuLOeRxFYLPj0F9o+Y/R8wHBV48kCKhz2r1v0b6SfQ/jSm1B61x -BrxLW64qOdIOzS8bLyhUDKkrcPesr8V548aRtUKhAoGBAKlNJFd8BCGKD9Td+3dE -oP1iHTX5XZ+cQIqL0e+GMQlK4HnQP566DFZU5/GHNNAfmyxd5iSRwhTqPMHRAmOb -pqQwsyufx0dFeIBxeSO3Z6jW5h2sl4nBipZpw9bzv6EBL1xRr0SfMNZzdnf4JFzc -0htGo/VO93Z2pv8w7uGUz1nN ------END PRIVATE KEY----- diff --git a/qa/evil-tests/build.gradle b/qa/evil-tests/build.gradle index 681ca0c712bb2..1720b21b45d68 100644 --- a/qa/evil-tests/build.gradle +++ b/qa/evil-tests/build.gradle @@ -41,7 +41,9 @@ apply plugin: 'opensearch.standalone-test' dependencies { testImplementation 'com.google.jimfs:jimfs:1.3.0' - testImplementation project(':distribution:tools:plugin-cli') + testImplementation(project(':distribution:tools:plugin-cli')) { + exclude group: 'org.bouncycastle' + } } // TODO: give each evil test its own fresh JVM for more isolation. diff --git a/server/src/main/resources/org/opensearch/bootstrap/test.policy b/server/src/main/resources/org/opensearch/bootstrap/test.policy index e0a183b7eac88..9e1d5cebffc0e 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/test.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/test.policy @@ -7,10 +7,29 @@ */ grant { - // allow to use JVM tooling (Java Compiler) in tests for annotation processing + // allow to use JVM tooling (Java Compiler) in tests for annotation processing permission java.io.FilePermission "${java.home}/lib/*", "read"; permission java.io.FilePermission "${java.home}/lib/modules/*", "read"; permission java.lang.RuntimePermission "accessSystemModules"; permission java.lang.RuntimePermission "accessDeclaredMembers"; permission java.lang.RuntimePermission "accessClassInPackage.*"; + + // security + permission java.io.FilePermission "${java.home}/lib/security/cacerts", "read"; + permission java.io.FilePermission "${java.home}/lib/security/jssecacerts", "read"; + permission java.lang.RuntimePermission "accessClassInPackage.sun.security.internal.spec"; + permission java.lang.RuntimePermission "closeClassLoader"; + permission java.lang.RuntimePermission "getProtectionDomain"; + permission java.security.SecurityPermission "getProperty.jdk.certpath.disabledAlgorithms"; + permission java.security.SecurityPermission "getProperty.jdk.tls.disabledAlgorithms"; + permission java.security.SecurityPermission "getProperty.jdk.tls.server.defaultDHEParameters"; + permission java.security.SecurityPermission "getProperty.keystore.type.compat"; + permission java.security.SecurityPermission "getProperty.org.bouncycastle.*"; + permission java.security.SecurityPermission "putProviderProperty.BC"; + permission java.security.SecurityPermission "removeProvider.SunJCE"; + permission java.security.SecurityPermission "removeProviderProperty.BC"; + permission java.util.PropertyPermission "java.runtime.name", "read"; + permission org.bouncycastle.crypto.CryptoServicesPermission "defaultRandomConfig"; + permission org.bouncycastle.crypto.CryptoServicesPermission "exportPrivateKey"; + permission org.bouncycastle.crypto.CryptoServicesPermission "exportSecretKey"; }; diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/kdc.conf.template b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/kdc.conf.template index 22909ddf60013..69be28f4548c3 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/kdc.conf.template +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/kdc.conf.template @@ -16,8 +16,8 @@ # under the License. [kdcdefaults] - kdc_listen = 88 - kdc_tcp_listen = 88 + kdc_ports = 88 + kdc_tcp_ports = 88 [realms] ${REALM_NAME} = { @@ -25,8 +25,7 @@ max_life = 12h 0m 0s max_renewable_life = 7d 0h 0m 0s master_key_type = aes256-cts - # remove aes256-cts:normal since unlimited strength policy needs installed for java to use it. - supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal + supported_enctypes = aes256-cts-hmac-sha1-96:normal aes128-cts-hmac-sha1-96:normal } [logging] diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template index 207fe939fb7a5..a87c5b50d5cf3 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template @@ -33,18 +33,15 @@ dns_canonicalize_hostname = false dns_lookup_kdc = false dns_lookup_realm = false - dns_uri_lookup = false forwardable = true ignore_acceptor_hostname = true rdns = false - default_tgs_enctypes = rc4-hmac - default_tkt_enctypes = rc4-hmac - permitted_enctypes = rc4-hmac + default_tgs_enctypes = aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 + default_tkt_enctypes = aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 + permitted_enctypes = aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 # udp_preference_limit = 1 - kdc_timeout = 3000 canonicalize = true - # See please https://seanjmullan.org/blog/2021/09/14/jdk17 (deprecate 3DES and RC4 in Kerberos) - allow_weak_crypto = true + allow_weak_crypto = false [realms] ${REALM_NAME} = { @@ -52,6 +49,8 @@ kdc = 127.0.0.1:${MAPPED_PORT} admin_server = ${KDC_NAME}:749 default_domain = ${BUILD_ZONE} + master_key_type = aes256-cts + supported_enctypes = aes256-cts-hmac-sha1-96:normal aes128-cts-hmac-sha1-96:normal } [domain_realm] diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 84a536fdf62c8..47addd36318a4 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -49,6 +49,9 @@ dependencies { api "org.mockito:mockito-core:${versions.mockito}" api "net.bytebuddy:byte-buddy:${versions.bytebuddy}" api "org.objenesis:objenesis:${versions.objenesis}" + api "org.bouncycastle:bcprov-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bcpkix-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bcutil-jdk18on:${versions.bouncycastle}" annotationProcessor "org.apache.logging.log4j:log4j-core:${versions.log4j}" } diff --git a/test/framework/licenses/bcpkix-jdk18on-1.78.jar.sha1 b/test/framework/licenses/bcpkix-jdk18on-1.78.jar.sha1 new file mode 100644 index 0000000000000..385a9d930eede --- /dev/null +++ b/test/framework/licenses/bcpkix-jdk18on-1.78.jar.sha1 @@ -0,0 +1 @@ +dd61bcdb87678451dd42d42e267979bd4b4451a1 \ No newline at end of file diff --git a/test/framework/licenses/bcprov-jdk18on-1.78.jar.sha1 b/test/framework/licenses/bcprov-jdk18on-1.78.jar.sha1 new file mode 100644 index 0000000000000..47fb5fd5e5f5d --- /dev/null +++ b/test/framework/licenses/bcprov-jdk18on-1.78.jar.sha1 @@ -0,0 +1 @@ +619aafb92dc0b4c6cc4cf86c487ca48ee2d67a8e \ No newline at end of file diff --git a/test/framework/licenses/bouncycastle-LICENSE.txt b/test/framework/licenses/bouncycastle-LICENSE.txt new file mode 100644 index 0000000000000..5c7c14696849d --- /dev/null +++ b/test/framework/licenses/bouncycastle-LICENSE.txt @@ -0,0 +1,14 @@ +Copyright (c) 2000 - 2023 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/framework/licenses/bouncycastle-NOTICE.txt b/test/framework/licenses/bouncycastle-NOTICE.txt new file mode 100644 index 0000000000000..8b137891791fe --- /dev/null +++ b/test/framework/licenses/bouncycastle-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/test/framework/src/main/java/org/opensearch/test/KeyStoreUtils.java b/test/framework/src/main/java/org/opensearch/test/KeyStoreUtils.java new file mode 100644 index 0000000000000..c90b2b872f8ba --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/KeyStoreUtils.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import org.bouncycastle.cert.X509CertificateHolder; +import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; +import org.bouncycastle.cert.jcajce.JcaX509v1CertificateBuilder; +import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; + +import javax.security.auth.x500.X500Principal; +import javax.security.auth.x500.X500PrivateCredential; + +import java.math.BigInteger; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.KeyStore; +import java.security.cert.X509Certificate; +import java.util.Date; + +public class KeyStoreUtils { + + public static final char[] KEYSTORE_PASSWORD = "keystore_password".toCharArray(); + + public static KeyStore createServerKeyStore() throws Exception { + var serverCred = createCredential(); + var keyStore = KeyStore.getInstance("JKS"); + keyStore.load(null, null); + keyStore.setKeyEntry( + serverCred.getAlias(), + serverCred.getPrivateKey(), + KEYSTORE_PASSWORD, + new X509Certificate[] { serverCred.getCertificate() } + ); + return keyStore; + } + + private static X500PrivateCredential createCredential() throws Exception { + var keyPairGenerator = KeyPairGenerator.getInstance("RSA"); + keyPairGenerator.initialize(2048); + var keyPair = keyPairGenerator.generateKeyPair(); + var rootCert = new JcaX509CertificateConverter().getCertificate(generateCert(keyPair)); + return new X500PrivateCredential(rootCert, keyPair.getPrivate(), "server-ca"); + } + + private static X509CertificateHolder generateCert(KeyPair pair) throws Exception { + var baseTime = System.currentTimeMillis(); + // 10 years in milliseconds + var validityPeriod = 10L * 365 * 24 * 60 * 60 * 1000; + + var certBuilder = new JcaX509v1CertificateBuilder( + new X500Principal("CN=Test CA Certificate"), + BigInteger.valueOf(1), + new Date(baseTime), + new Date(baseTime + validityPeriod), + new X500Principal("CN=Test CA Certificate"), + pair.getPublic() + ); + var signer = new JcaContentSignerBuilder("SHA256withRSA").build(pair.getPrivate()); + return certBuilder.build(signer); + } + +} From ee7fbbd226b2be81128eaafe19aad0a39244368c Mon Sep 17 00:00:00 2001 From: Vinay Krishna Pudyodu Date: Thu, 27 Feb 2025 09:42:10 -0800 Subject: [PATCH 27/48] Implemented computation of segment replication stats at shard level (#17055) * Implemented computation of segment replication stats at shard level The method implemented here computes the segment replication stats at the shard level, instead of relying on the primary shard to compute stats based on reports from its replicas. Signed-off-by: Vinay Krishna Pudyodu * Updated style checks in the test Signed-off-by: Vinay Krishna Pudyodu * Updated changelog Signed-off-by: Vinay Krishna Pudyodu * fixed style issues Signed-off-by: Vinay Krishna Pudyodu * Fix the failing integration test Signed-off-by: Vinay Krishna Pudyodu * Fix stylecheck Signed-off-by: Vinay Krishna Pudyodu * Fixed the comments for the initial revision Signed-off-by: Vinay Krishna Pudyodu * Updated to use System.nanoTime() for lag calculation Signed-off-by: Vinay Krishna Pudyodu * Fixed the integration test for node stats Signed-off-by: Vinay Krishna Pudyodu * Modified the version in the ReplicationCheckpoint for backward compatibility Signed-off-by: Vinay Krishna Pudyodu * Added precomputation logic for the stats calculation Signed-off-by: Vinay Krishna Pudyodu * Removed unwanted lines Signed-off-by: Vinay Krishna Pudyodu * Clean up the maps when index closed Signed-off-by: Vinay Krishna Pudyodu * Added a null check for the indexshard checkpoint Signed-off-by: Vinay Krishna Pudyodu * fix style checks Signed-off-by: Vinay Krishna Pudyodu * Updated version and added bwc for RemoteSegmentMetadata Signed-off-by: Vinay Krishna Pudyodu * Upated the javadoc comments Signed-off-by: Vinay Krishna Pudyodu * Address comments PR Signed-off-by: Vinay Krishna Pudyodu * Removed the latestReceivedCheckpoint map from SegmentReplicationTargetService Signed-off-by: Vinay Krishna Pudyodu * Added granular locks for the concurrency of stats methods Signed-off-by: Vinay Krishna Pudyodu * Style check fixes Signed-off-by: Vinay Krishna Pudyodu * Changes to maintain atomicity Signed-off-by: Vinay Krishna Pudyodu * spotlessApply Signed-off-by: Vinay Krishna Pudyodu * removed querying the remotestore when replication is in progress Signed-off-by: Vinay Krishna Pudyodu * spotlessApply Signed-off-by: Vinay Krishna Pudyodu --------- Signed-off-by: Vinay Krishna Pudyodu --- CHANGELOG.md | 1 + .../opensearch/index/shard/IndexShardIT.java | 5 +- .../SegmentReplicationStatsIT.java | 16 +- .../io/IndexIOStreamHandlerFactory.java | 25 +++ .../io/VersionedCodecStreamWrapper.java | 23 ++- .../org/opensearch/index/IndexModule.java | 10 +- .../org/opensearch/index/IndexService.java | 11 +- .../opensearch/index/ReplicationStats.java | 4 + .../opensearch/index/shard/IndexShard.java | 20 +- .../store/RemoteSegmentStoreDirectory.java | 5 +- .../metadata/RemoteSegmentMetadata.java | 30 ++- .../RemoteSegmentMetadataHandler.java | 9 +- .../RemoteSegmentMetadataHandlerFactory.java | 44 +++++ .../transfer/TranslogTransferManager.java | 3 +- ...ranslogTransferMetadataHandlerFactory.java | 37 ++++ .../opensearch/indices/IndicesService.java | 10 +- .../RemoteStoreReplicationSource.java | 1 - .../replication/SegmentReplicationTarget.java | 5 +- .../SegmentReplicationTargetService.java | 20 +- .../replication/SegmentReplicator.java | 149 ++++++++++++++- .../checkpoint/ReplicationCheckpoint.java | 39 +++- .../main/java/org/opensearch/node/Node.java | 3 +- .../io/VersionedCodecStreamWrapperTests.java | 11 +- .../opensearch/index/IndexModuleTests.java | 3 +- .../index/seqno/ReplicationTrackerTests.java | 15 +- .../RemoteSegmentStoreDirectoryTests.java | 9 +- ...oteSegmentMetadataHandlerFactoryTests.java | 44 +++++ .../RemoteSegmentMetadataHandlerTests.java | 2 +- ...ogTransferMetadataHandlerFactoryTests.java | 42 +++++ .../SegmentReplicationTargetServiceTests.java | 12 +- .../SegmentReplicationTargetTests.java | 15 +- .../replication/SegmentReplicatorTests.java | 174 +++++++++++++++++- .../replication/common/CopyStateTests.java | 3 +- .../index/shard/IndexShardTestCase.java | 7 +- 34 files changed, 706 insertions(+), 101 deletions(-) create mode 100644 server/src/main/java/org/opensearch/common/io/IndexIOStreamHandlerFactory.java create mode 100644 server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerFactory.java create mode 100644 server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactory.java create mode 100644 server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerFactoryTests.java create mode 100644 server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactoryTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index e4779231977b9..6aa18ce0064ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/)) - Added ability to retrieve value from DocValues in a flat_object filed([#16802](https://github.com/opensearch-project/OpenSearch/pull/16802)) - Improve performace of NumericTermAggregation by avoiding unnecessary sorting([#17252](https://github.com/opensearch-project/OpenSearch/pull/17252)) +- Implemented computation of segment replication stats at shard level ([#17055](https://github.com/opensearch-project/OpenSearch/pull/17055)) - [Rule Based Auto-tagging] Add in-memory attribute value store ([#17342](https://github.com/opensearch-project/OpenSearch/pull/17342)) ### Dependencies diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 7fd219a3dd9dc..2d0918ff6e89a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -114,6 +114,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Stream; @@ -136,6 +137,7 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLength; +import static org.mockito.Mockito.mock; public class IndexShardIT extends OpenSearchSingleNodeTestCase { @@ -716,7 +718,8 @@ public static final IndexShard newIndexShard( null, DefaultRemoteStoreSettings.INSTANCE, false, - IndexShardTestUtils.getFakeDiscoveryNodes(initializingShardRouting) + IndexShardTestUtils.getFakeDiscoveryNodes(initializingShardRouting), + mock(Function.class) ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java index 89aef6f0be1a6..5d69799e32647 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java @@ -404,19 +404,17 @@ public void testSegmentReplicationNodeAndIndexStats() throws Exception { for (NodeStats nodeStats : nodesStatsResponse.getNodes()) { ReplicationStats replicationStats = nodeStats.getIndices().getSegments().getReplicationStats(); - // primary node - should hold replication statistics + // primary node - do not have any replication statistics if (nodeStats.getNode().getName().equals(primaryNode)) { + assertTrue(replicationStats.getMaxBytesBehind() == 0); + assertTrue(replicationStats.getTotalBytesBehind() == 0); + assertTrue(replicationStats.getMaxReplicationLag() == 0); + } + // replica nodes - should hold replication statistics + if (nodeStats.getNode().getName().equals(replicaNode1) || nodeStats.getNode().getName().equals(replicaNode2)) { assertTrue(replicationStats.getMaxBytesBehind() > 0); assertTrue(replicationStats.getTotalBytesBehind() > 0); assertTrue(replicationStats.getMaxReplicationLag() > 0); - // 2 replicas so total bytes should be double of max - assertEquals(replicationStats.getMaxBytesBehind() * 2, replicationStats.getTotalBytesBehind()); - } - // replica nodes - should hold empty replication statistics - if (nodeStats.getNode().getName().equals(replicaNode1) || nodeStats.getNode().getName().equals(replicaNode2)) { - assertEquals(0, replicationStats.getMaxBytesBehind()); - assertEquals(0, replicationStats.getTotalBytesBehind()); - assertEquals(0, replicationStats.getMaxReplicationLag()); } } // get replication statistics at index level diff --git a/server/src/main/java/org/opensearch/common/io/IndexIOStreamHandlerFactory.java b/server/src/main/java/org/opensearch/common/io/IndexIOStreamHandlerFactory.java new file mode 100644 index 0000000000000..a4ad161d0ced3 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/io/IndexIOStreamHandlerFactory.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.io; + +/** + * Interface for factory to provide handler implementation for type {@link T} + * @param The type of content to be read/written to stream + * + * @opensearch.internal + */ +public interface IndexIOStreamHandlerFactory { + + /** + * Implements logic to provide handler based on the stream versions + * @param version stream version + * @return Handler for reading/writing content streams to/from - {@link T} + */ + IndexIOStreamHandler getHandler(int version); +} diff --git a/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java b/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java index 8089d354a2480..b62ae1f1d3956 100644 --- a/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java +++ b/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java @@ -28,18 +28,25 @@ public class VersionedCodecStreamWrapper { private static final Logger logger = LogManager.getLogger(VersionedCodecStreamWrapper.class); - // TODO This can be updated to hold a streamReadWriteHandlerFactory and get relevant handler based on the stream versions - private final IndexIOStreamHandler indexIOStreamHandler; + private final IndexIOStreamHandlerFactory indexIOStreamHandlerFactory; + private final int minVersion; private final int currentVersion; private final String codec; /** - * @param indexIOStreamHandler handler to read/write stream from T + * @param indexIOStreamHandlerFactory factory for providing handler to read/write stream from T + * @param minVersion earliest supported version of the stream * @param currentVersion latest supported version of the stream * @param codec: stream codec */ - public VersionedCodecStreamWrapper(IndexIOStreamHandler indexIOStreamHandler, int currentVersion, String codec) { - this.indexIOStreamHandler = indexIOStreamHandler; + public VersionedCodecStreamWrapper( + IndexIOStreamHandlerFactory indexIOStreamHandlerFactory, + int minVersion, + int currentVersion, + String codec + ) { + this.indexIOStreamHandlerFactory = indexIOStreamHandlerFactory; + this.minVersion = minVersion; this.currentVersion = currentVersion; this.codec = codec; } @@ -87,7 +94,7 @@ public void writeStream(IndexOutput indexOutput, T content) throws IOException { */ private int checkHeader(IndexInput indexInput) throws IOException { // TODO Once versioning strategy is decided we'll add support for min/max supported versions - return CodecUtil.checkHeader(indexInput, this.codec, this.currentVersion, this.currentVersion); + return CodecUtil.checkHeader(indexInput, this.codec, minVersion, this.currentVersion); } /** @@ -120,8 +127,6 @@ private void writeFooter(IndexOutput indexOutput) throws IOException { * @param version stream content version */ private IndexIOStreamHandler getHandlerForVersion(int version) { - // TODO implement factory and pick relevant handler based on version. - // It should also take into account min and max supported versions - return this.indexIOStreamHandler; + return this.indexIOStreamHandlerFactory.getHandler(version); } } diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 52dd92f31d70b..7016ddb8e59b8 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -57,6 +57,7 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.NodeEnvironment; @@ -652,7 +653,8 @@ public IndexService newIndexService( clusterDefaultRefreshIntervalSupplier, recoverySettings, remoteStoreSettings, - (s) -> {} + (s) -> {}, + shardId -> ReplicationStats.empty() ); } @@ -678,7 +680,8 @@ public IndexService newIndexService( Supplier clusterDefaultRefreshIntervalSupplier, RecoverySettings recoverySettings, RemoteStoreSettings remoteStoreSettings, - Consumer replicator + Consumer replicator, + Function segmentReplicationStatsProvider ) throws IOException { final IndexEventListener eventListener = freeze(); Function> readerWrapperFactory = indexReaderWrapper @@ -740,7 +743,8 @@ public IndexService newIndexService( remoteStoreSettings, fileCache, compositeIndexSettings, - replicator + replicator, + segmentReplicationStatsProvider ); success = true; return indexService; diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 72d723c7e1199..e265ce3590121 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -197,6 +197,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final FileCache fileCache; private final CompositeIndexSettings compositeIndexSettings; private final Consumer replicator; + private final Function segmentReplicationStatsProvider; public IndexService( IndexSettings indexSettings, @@ -235,7 +236,8 @@ public IndexService( RemoteStoreSettings remoteStoreSettings, FileCache fileCache, CompositeIndexSettings compositeIndexSettings, - Consumer replicator + Consumer replicator, + Function segmentReplicationStatsProvider ) { super(indexSettings); this.allowExpensiveQueries = allowExpensiveQueries; @@ -322,6 +324,7 @@ public IndexService( this.compositeIndexSettings = compositeIndexSettings; this.fileCache = fileCache; this.replicator = replicator; + this.segmentReplicationStatsProvider = segmentReplicationStatsProvider; updateFsyncTaskIfNecessary(); } @@ -398,7 +401,8 @@ public IndexService( remoteStoreSettings, null, null, - s -> {} + s -> {}, + (shardId) -> ReplicationStats.empty() ); } @@ -694,7 +698,8 @@ protected void closeInternal() { recoverySettings, remoteStoreSettings, seedRemote, - discoveryNodes + discoveryNodes, + segmentReplicationStatsProvider ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/opensearch/index/ReplicationStats.java b/server/src/main/java/org/opensearch/index/ReplicationStats.java index 8987a492e9a90..22628e86d309f 100644 --- a/server/src/main/java/org/opensearch/index/ReplicationStats.java +++ b/server/src/main/java/org/opensearch/index/ReplicationStats.java @@ -42,6 +42,10 @@ public ReplicationStats(StreamInput in) throws IOException { this.maxReplicationLag = in.readVLong(); } + public static ReplicationStats empty() { + return new ReplicationStats(); + } + public ReplicationStats() { } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index df841dac4cf8e..f8ad3fc8cf866 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -361,6 +361,7 @@ Runnable getGlobalCheckpointSyncer() { */ private final ShardMigrationState shardMigrationState; private DiscoveryNodes discoveryNodes; + private final Function segmentReplicationStatsProvider; public IndexShard( final ShardRouting shardRouting, @@ -391,7 +392,8 @@ public IndexShard( final RecoverySettings recoverySettings, final RemoteStoreSettings remoteStoreSettings, boolean seedRemote, - final DiscoveryNodes discoveryNodes + final DiscoveryNodes discoveryNodes, + final Function segmentReplicationStatsProvider ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -493,6 +495,7 @@ public boolean shouldCache(Query query) { this.fileDownloader = new RemoteStoreFileDownloader(shardRouting.shardId(), threadPool, recoverySettings); this.shardMigrationState = getShardMigrationState(indexSettings, seedRemote); this.discoveryNodes = discoveryNodes; + this.segmentReplicationStatsProvider = segmentReplicationStatsProvider; } public ThreadPool getThreadPool() { @@ -3233,17 +3236,10 @@ public Set getReplicationStatsForTrackedReplicas() } public ReplicationStats getReplicationStats() { - if (indexSettings.isSegRepEnabledOrRemoteNode() && routingEntry().primary()) { - final Set stats = getReplicationStatsForTrackedReplicas(); - long maxBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).max().orElse(0L); - long totalBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).sum(); - long maxReplicationLag = stats.stream() - .mapToLong(SegmentReplicationShardStats::getCurrentReplicationLagMillis) - .max() - .orElse(0L); - return new ReplicationStats(maxBytesBehind, totalBytesBehind, maxReplicationLag); - } - return new ReplicationStats(); + if (indexSettings.isSegRepEnabledOrRemoteNode() && !routingEntry().primary()) { + return segmentReplicationStatsProvider.apply(shardId); + } + return ReplicationStats.empty(); } /** diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 46a90da2a18b6..c18902b69d23c 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -38,7 +38,7 @@ import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; -import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandlerFactory; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import org.opensearch.threadpool.ThreadPool; @@ -104,7 +104,8 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement private Map segmentsUploadedToRemoteStore; private static final VersionedCodecStreamWrapper metadataStreamWrapper = new VersionedCodecStreamWrapper<>( - new RemoteSegmentMetadataHandler(), + new RemoteSegmentMetadataHandlerFactory(), + RemoteSegmentMetadata.VERSION_ONE, RemoteSegmentMetadata.CURRENT_VERSION, RemoteSegmentMetadata.METADATA_CODEC ); diff --git a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java index 41a145273e8ef..463e08918b3f7 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java +++ b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java @@ -30,10 +30,15 @@ */ @PublicApi(since = "2.6.0") public class RemoteSegmentMetadata { + + public static final int VERSION_ONE = 1; + + public static final int VERSION_TWO = 2; + /** * Latest supported version of metadata */ - public static final int CURRENT_VERSION = 1; + public static final int CURRENT_VERSION = VERSION_TWO; /** * Metadata codec */ @@ -106,6 +111,11 @@ public static Map f ); } + /** + * Write always writes with the latest version of the RemoteSegmentMetadata + * @param out file output stream which will store stream content + * @throws IOException in case there is a problem writing the file + */ public void write(IndexOutput out) throws IOException { out.writeMapOfStrings(toMapOfStrings()); writeCheckpointToIndexOutput(replicationCheckpoint, out); @@ -113,11 +123,18 @@ public void write(IndexOutput out) throws IOException { out.writeBytes(segmentInfosBytes, segmentInfosBytes.length); } - public static RemoteSegmentMetadata read(IndexInput indexInput) throws IOException { + /** + * Read can happen in the upgraded version of replica which needs to support all versions of RemoteSegmentMetadata + * @param indexInput file input stream + * @param version version of the RemoteSegmentMetadata + * @return {@code RemoteSegmentMetadata} + * @throws IOException in case there is a problem reading from the file input stream + */ + public static RemoteSegmentMetadata read(IndexInput indexInput, int version) throws IOException { Map metadata = indexInput.readMapOfStrings(); final Map uploadedSegmentMetadataMap = RemoteSegmentMetadata .fromMapOfStrings(metadata); - ReplicationCheckpoint replicationCheckpoint = readCheckpointFromIndexInput(indexInput, uploadedSegmentMetadataMap); + ReplicationCheckpoint replicationCheckpoint = readCheckpointFromIndexInput(indexInput, uploadedSegmentMetadataMap, version); int byteArraySize = (int) indexInput.readLong(); byte[] segmentInfosBytes = new byte[byteArraySize]; indexInput.readBytes(segmentInfosBytes, 0, byteArraySize); @@ -136,11 +153,13 @@ public static void writeCheckpointToIndexOutput(ReplicationCheckpoint replicatio out.writeLong(replicationCheckpoint.getSegmentInfosVersion()); out.writeLong(replicationCheckpoint.getLength()); out.writeString(replicationCheckpoint.getCodec()); + out.writeLong(replicationCheckpoint.getCreatedTimeStamp()); } private static ReplicationCheckpoint readCheckpointFromIndexInput( IndexInput in, - Map uploadedSegmentMetadataMap + Map uploadedSegmentMetadataMap, + int version ) throws IOException { return new ReplicationCheckpoint( new ShardId(new Index(in.readString(), in.readString()), in.readVInt()), @@ -149,7 +168,8 @@ private static ReplicationCheckpoint readCheckpointFromIndexInput( in.readLong(), in.readLong(), in.readString(), - toStoreFileMetadata(uploadedSegmentMetadataMap) + toStoreFileMetadata(uploadedSegmentMetadataMap), + version >= VERSION_TWO ? in.readLong() : 0 ); } diff --git a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandler.java b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandler.java index 3077d8c76ddae..9fa76b38d2b07 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandler.java +++ b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandler.java @@ -20,6 +20,13 @@ * @opensearch.internal */ public class RemoteSegmentMetadataHandler implements IndexIOStreamHandler { + + private final int version; + + public RemoteSegmentMetadataHandler(int version) { + this.version = version; + } + /** * Reads metadata content from metadata file input stream and parsed into {@link RemoteSegmentMetadata} * @param indexInput metadata file input stream with {@link IndexInput#getFilePointer()} pointing to metadata content @@ -27,7 +34,7 @@ public class RemoteSegmentMetadataHandler implements IndexIOStreamHandler { + private final AtomicReference> handlerRef = new AtomicReference<>(); + + @Override + public IndexIOStreamHandler getHandler(int version) { + IndexIOStreamHandler current = handlerRef.get(); + if (current != null) { + return current; + } + + IndexIOStreamHandler newHandler = createHandler(version); + handlerRef.compareAndSet(null, newHandler); + return handlerRef.get(); + } + + private IndexIOStreamHandler createHandler(int version) { + return switch (version) { + case RemoteSegmentMetadata.VERSION_ONE -> new RemoteSegmentMetadataHandler(RemoteSegmentMetadata.VERSION_ONE); + case RemoteSegmentMetadata.VERSION_TWO -> new RemoteSegmentMetadataHandler(RemoteSegmentMetadata.VERSION_TWO); + default -> throw new IllegalArgumentException("Unsupported RemoteSegmentMetadata version: " + version); + }; + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 1e621d6cb7688..d410f473c71f1 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -75,7 +75,8 @@ public class TranslogTransferManager { private final Logger logger; private static final VersionedCodecStreamWrapper metadataStreamWrapper = new VersionedCodecStreamWrapper<>( - new TranslogTransferMetadataHandler(), + new TranslogTransferMetadataHandlerFactory(), + TranslogTransferMetadata.CURRENT_VERSION, TranslogTransferMetadata.CURRENT_VERSION, TranslogTransferMetadata.METADATA_CODEC ); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactory.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactory.java new file mode 100644 index 0000000000000..8f8e3e816d665 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactory.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.common.io.IndexIOStreamHandler; +import org.opensearch.common.io.IndexIOStreamHandlerFactory; + +import java.util.concurrent.ConcurrentHashMap; + +/** + * {@link TranslogTransferMetadataHandlerFactory} is a factory class to create {@link TranslogTransferMetadataHandler} + * instances based on the {@link TranslogTransferMetadata} version + * + * @opensearch.internal + */ +public class TranslogTransferMetadataHandlerFactory implements IndexIOStreamHandlerFactory { + + private final ConcurrentHashMap> handlers = new ConcurrentHashMap<>(); + + @Override + public IndexIOStreamHandler getHandler(int version) { + return handlers.computeIfAbsent(version, this::createHandler); + } + + private IndexIOStreamHandler createHandler(int version) { + return switch (version) { + case TranslogTransferMetadata.CURRENT_VERSION -> new TranslogTransferMetadataHandler(); + default -> throw new IllegalArgumentException("Unsupported TranslogTransferMetadata version: " + version); + }; + } +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index d679240955a07..527c2c23ba6b1 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -105,6 +105,7 @@ import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.IngestionConsumerFactory; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.cache.request.ShardRequestCache; import org.opensearch.index.compositeindex.CompositeIndexSettings; @@ -365,6 +366,7 @@ public class IndicesService extends AbstractLifecycleComponent private final FileCache fileCache; private final CompositeIndexSettings compositeIndexSettings; private final Consumer replicator; + private final Function segmentReplicationStatsProvider; private volatile int maxSizeInRequestCache; @Override @@ -404,7 +406,8 @@ public IndicesService( RemoteStoreSettings remoteStoreSettings, FileCache fileCache, CompositeIndexSettings compositeIndexSettings, - Consumer replicator + Consumer replicator, + Function segmentReplicationStatsProvider ) { this.settings = settings; this.threadPool = threadPool; @@ -515,6 +518,7 @@ protected void closeInternal() { this.compositeIndexSettings = compositeIndexSettings; this.fileCache = fileCache; this.replicator = replicator; + this.segmentReplicationStatsProvider = segmentReplicationStatsProvider; this.maxSizeInRequestCache = INDICES_REQUEST_CACHE_MAX_SIZE_ALLOWED_IN_CACHE_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(INDICES_REQUEST_CACHE_MAX_SIZE_ALLOWED_IN_CACHE_SETTING, this::setMaxSizeInRequestCache); @@ -581,6 +585,7 @@ public IndicesService( remoteStoreSettings, null, null, + null, null ); } @@ -998,7 +1003,8 @@ private synchronized IndexService createIndexService( this::getClusterDefaultRefreshInterval, this.recoverySettings, this.remoteStoreSettings, - replicator + replicator, + segmentReplicationStatsProvider ); } diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java index b06b3e0497cf7..30d9c362b6269 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java @@ -110,7 +110,6 @@ public void getSegmentFiles( return; } logger.debug("Downloading segment files from remote store {}", filesToFetch); - if (remoteMetadataExists()) { final Directory storeDirectory = indexShard.store().directory(); final Collection directoryFiles = List.of(storeDirectory.listAll()); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 7131b49a41834..64bd73ebb4611 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -39,6 +39,7 @@ import java.util.List; import java.util.Locale; import java.util.Set; +import java.util.function.BiConsumer; import java.util.stream.Collectors; /** @@ -161,7 +162,7 @@ public void writeFileChunk( * * @param listener {@link ActionListener} listener. */ - public void startReplication(ActionListener listener) { + public void startReplication(ActionListener listener, BiConsumer checkpointUpdater) { cancellableThreads.setOnCancel((reason, beforeCancelEx) -> { throw new CancellableThreads.ExecutionCancelledException("replication was canceled reason [" + reason + "]"); }); @@ -177,6 +178,8 @@ public void startReplication(ActionListener listener) { source.getCheckpointMetadata(getId(), checkpoint, checkpointInfoListener); checkpointInfoListener.whenComplete(checkpointInfo -> { + checkpointUpdater.accept(checkpointInfo.getCheckpoint(), this.indexShard); + final List filesToFetch = getFiles(checkpointInfo); state.setStage(SegmentReplicationState.Stage.GET_FILES); cancellableThreads.checkForCancel(); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index 8fee3f671ecc9..d57f35a5079fc 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -22,7 +22,6 @@ import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.CancellableThreads; -import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.transport.TransportResponse; @@ -49,7 +48,6 @@ import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.Map; import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; @@ -58,7 +56,6 @@ /** * Service class that handles incoming checkpoints to initiate replication events on replicas. - * * @opensearch.internal */ public class SegmentReplicationTargetService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { @@ -70,8 +67,6 @@ public class SegmentReplicationTargetService extends AbstractLifecycleComponent private final SegmentReplicationSourceFactory sourceFactory; - protected final Map latestReceivedCheckpoint = ConcurrentCollections.newConcurrentMap(); - private final IndicesService indicesService; private final ClusterService clusterService; private final TransportService transportService; @@ -216,7 +211,6 @@ public void clusterChanged(ClusterChangedEvent event) { public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null && indexShard.indexSettings().isSegRepEnabledOrRemoteNode()) { replicator.cancel(indexShard.shardId(), "Shard closing"); - latestReceivedCheckpoint.remove(shardId); } } @@ -227,6 +221,7 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh @Override public void afterIndexShardStarted(IndexShard indexShard) { if (indexShard.indexSettings().isSegRepEnabledOrRemoteNode() && indexShard.routingEntry().primary() == false) { + replicator.initializeStats(indexShard.shardId()); processLatestReceivedCheckpoint(indexShard, Thread.currentThread()); } } @@ -241,7 +236,6 @@ public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting ol && oldRouting.primary() == false && newRouting.primary()) { replicator.cancel(indexShard.shardId(), "Shard has been promoted to primary"); - latestReceivedCheckpoint.remove(indexShard.shardId()); } } @@ -468,7 +462,7 @@ private DiscoveryNode getPrimaryNode(ShardRouting primaryShard) { // visible to tests protected boolean processLatestReceivedCheckpoint(IndexShard replicaShard, Thread thread) { - final ReplicationCheckpoint latestPublishedCheckpoint = latestReceivedCheckpoint.get(replicaShard.shardId()); + final ReplicationCheckpoint latestPublishedCheckpoint = replicator.getPrimaryCheckpoint(replicaShard.shardId()); if (latestPublishedCheckpoint != null) { logger.trace( () -> new ParameterizedMessage( @@ -481,7 +475,7 @@ protected boolean processLatestReceivedCheckpoint(IndexShard replicaShard, Threa // if we retry ensure the shard is not in the process of being closed. // it will be removed from indexService's collection before the shard is actually marked as closed. if (indicesService.getShardOrNull(replicaShard.shardId()) != null) { - onNewCheckpoint(latestReceivedCheckpoint.get(replicaShard.shardId()), replicaShard); + onNewCheckpoint(replicator.getPrimaryCheckpoint(replicaShard.shardId()), replicaShard); } }; // Checks if we are using same thread and forks if necessary. @@ -497,13 +491,7 @@ protected boolean processLatestReceivedCheckpoint(IndexShard replicaShard, Threa // visible to tests protected void updateLatestReceivedCheckpoint(ReplicationCheckpoint receivedCheckpoint, IndexShard replicaShard) { - if (latestReceivedCheckpoint.get(replicaShard.shardId()) != null) { - if (receivedCheckpoint.isAheadOf(latestReceivedCheckpoint.get(replicaShard.shardId()))) { - latestReceivedCheckpoint.replace(replicaShard.shardId(), receivedCheckpoint); - } - } else { - latestReceivedCheckpoint.put(replicaShard.shardId(), receivedCheckpoint); - } + replicator.updateReplicationCheckpointStats(receivedCheckpoint, replicaShard); } /** diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java index ad3bc1933208c..b8a5774c21c1f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java @@ -19,8 +19,10 @@ import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationCollection; import org.opensearch.indices.replication.common.ReplicationFailedException; @@ -29,6 +31,10 @@ import java.io.IOException; import java.util.Map; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ConcurrentNavigableMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.TimeUnit; /** * This class is responsible for managing segment replication events on replicas. @@ -43,8 +49,11 @@ public class SegmentReplicator { private final ReplicationCollection onGoingReplications; private final Map completedReplications = ConcurrentCollections.newConcurrentMap(); - private final ThreadPool threadPool; + private final ConcurrentMap> replicationCheckpointStats = + ConcurrentCollections.newConcurrentMap(); + private final ConcurrentMap primaryCheckpoint = ConcurrentCollections.newConcurrentMap(); + private final ThreadPool threadPool; private final SetOnce sourceFactory; public SegmentReplicator(ThreadPool threadPool) { @@ -102,6 +111,135 @@ SegmentReplicationTarget startReplication( return target; } + /** + * Retrieves segment replication statistics for a specific shard. + * Its computed based on the last and first entry in the replicationCheckpointStats map. + * The Last entry gives the Bytes behind, and the difference in the first and last entry provides the lag. + * + * @param shardId The shardId to get statistics for + * @return ReplicationStats containing bytes behind and replication lag information + */ + public ReplicationStats getSegmentReplicationStats(final ShardId shardId) { + final ConcurrentNavigableMap existingCheckpointStats = replicationCheckpointStats.get(shardId); + if (existingCheckpointStats == null || existingCheckpointStats.isEmpty()) { + return ReplicationStats.empty(); + } + + Map.Entry lowestEntry = existingCheckpointStats.firstEntry(); + Map.Entry highestEntry = existingCheckpointStats.lastEntry(); + + long bytesBehind = highestEntry.getValue().getBytesBehind(); + long replicationLag = bytesBehind > 0L + ? TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - lowestEntry.getValue().getTimestamp()) + : 0; + + return new ReplicationStats(bytesBehind, bytesBehind, replicationLag); + } + + /** + * Updates the latest checkpoint of the primary for the replica shard and then + * calculates checkpoint statistics for the replica shard with the latest checkpoint information. + * This method maintains statistics about how far behind replica shards are from the primary. + * It calculates the bytes behind by comparing the latest-received and current checkpoint in the indexShard, + * and it maintains the bytes behind and timestamp for each segmentInfosVersion of latestCheckPoint. + *

+     * Example:
+     * {
+     *     [replica][0] : {
+     *                       7 : {bytesBehind=0, timestamp=1700220000000}
+     *                       8 : {bytesBehind=100, timestamp=1700330000000}
+     *                       9 : {bytesBehind=150, timestamp=1700440000000}
+     *                    }
+     * }
+     * 
+ * @param latestReceivedCheckPoint The most recent checkpoint from the primary + * @param indexShard The index shard where its updated + */ + public void updateReplicationCheckpointStats(final ReplicationCheckpoint latestReceivedCheckPoint, final IndexShard indexShard) { + ReplicationCheckpoint primaryCheckPoint = this.primaryCheckpoint.get(indexShard.shardId()); + if (primaryCheckPoint == null || latestReceivedCheckPoint.isAheadOf(primaryCheckPoint)) { + this.primaryCheckpoint.put(indexShard.shardId(), latestReceivedCheckPoint); + calculateReplicationCheckpointStats(latestReceivedCheckPoint, indexShard); + } + } + + /** + * Removes checkpoint statistics for all checkpoints up to and including the last successful sync + * and recalculates the bytes behind value for the last replicationCheckpointStats entry. + * This helps maintain only relevant checkpoint information and clean up old data. + * + * @param indexShard The index shard to prune checkpoints for + */ + protected void pruneCheckpointsUpToLastSync(final IndexShard indexShard) { + ReplicationCheckpoint latestCheckpoint = this.primaryCheckpoint.get(indexShard.shardId()); + if (latestCheckpoint != null) { + ReplicationCheckpoint indexReplicationCheckPoint = indexShard.getLatestReplicationCheckpoint(); + long segmentInfoVersion = indexReplicationCheckPoint.getSegmentInfosVersion(); + final ConcurrentNavigableMap existingCheckpointStats = replicationCheckpointStats.get( + indexShard.shardId() + ); + + if (existingCheckpointStats != null && !existingCheckpointStats.isEmpty()) { + existingCheckpointStats.keySet().removeIf(key -> key < segmentInfoVersion); + Map.Entry lastEntry = existingCheckpointStats.lastEntry(); + if (lastEntry != null) { + lastEntry.getValue().setBytesBehind(calculateBytesBehind(latestCheckpoint, indexReplicationCheckPoint)); + } + } + } + } + + private void calculateReplicationCheckpointStats(final ReplicationCheckpoint latestReceivedCheckPoint, final IndexShard indexShard) { + ReplicationCheckpoint indexShardReplicationCheckpoint = indexShard.getLatestReplicationCheckpoint(); + if (indexShardReplicationCheckpoint != null) { + long segmentInfosVersion = latestReceivedCheckPoint.getSegmentInfosVersion(); + long bytesBehind = calculateBytesBehind(latestReceivedCheckPoint, indexShardReplicationCheckpoint); + if (bytesBehind > 0) { + ConcurrentNavigableMap existingCheckpointStats = replicationCheckpointStats.get( + indexShard.shardId() + ); + if (existingCheckpointStats != null) { + existingCheckpointStats.computeIfAbsent( + segmentInfosVersion, + k -> new ReplicationCheckpointStats(bytesBehind, latestReceivedCheckPoint.getCreatedTimeStamp()) + ); + } + } + } + } + + private long calculateBytesBehind(final ReplicationCheckpoint latestCheckPoint, final ReplicationCheckpoint replicationCheckpoint) { + Store.RecoveryDiff diff = Store.segmentReplicationDiff(latestCheckPoint.getMetadataMap(), replicationCheckpoint.getMetadataMap()); + + return diff.missing.stream().mapToLong(StoreFileMetadata::length).sum(); + } + + public void initializeStats(ShardId shardId) { + replicationCheckpointStats.computeIfAbsent(shardId, k -> new ConcurrentSkipListMap<>()); + } + + private static class ReplicationCheckpointStats { + private long bytesBehind; + private final long timestamp; + + public ReplicationCheckpointStats(long bytesBehind, long timestamp) { + this.bytesBehind = bytesBehind; + this.timestamp = timestamp; + } + + public long getBytesBehind() { + return bytesBehind; + } + + public void setBytesBehind(long bytesBehind) { + this.bytesBehind = bytesBehind; + } + + public long getTimestamp() { + return timestamp; + } + } + /** * Runnable implementation to trigger a replication event. */ @@ -138,6 +276,7 @@ private void start(final long replicationId) { @Override public void onResponse(Void o) { logger.debug(() -> new ParameterizedMessage("Finished replicating {} marking as done.", target.description())); + pruneCheckpointsUpToLastSync(target.indexShard()); onGoingReplications.markAsDone(replicationId); if (target.state().getIndex().recoveredFileCount() != 0 && target.state().getIndex().recoveredBytes() != 0) { completedReplications.put(target.shardId(), target.state()); @@ -153,7 +292,7 @@ public void onFailure(Exception e) { } onGoingReplications.fail(replicationId, new ReplicationFailedException("Segment Replication failed", e), false); } - }); + }, this::updateReplicationCheckpointStats); } // pkg-private for integration tests @@ -197,12 +336,18 @@ int size() { void cancel(ShardId shardId, String reason) { onGoingReplications.cancelForShard(shardId, reason); + replicationCheckpointStats.remove(shardId); + primaryCheckpoint.remove(shardId); } SegmentReplicationTarget get(ShardId shardId) { return onGoingReplications.getOngoingReplicationTarget(shardId); } + ReplicationCheckpoint getPrimaryCheckpoint(ShardId shardId) { + return primaryCheckpoint.getOrDefault(shardId, ReplicationCheckpoint.empty(shardId)); + } + ReplicationCollection.ReplicationRef get(long id) { return onGoingReplications.get(id); } diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java index 29410159a4955..8380187a288ba 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java @@ -38,6 +38,7 @@ public class ReplicationCheckpoint implements Writeable, Comparable metadataMap; + private final long createdTimeStamp; public static ReplicationCheckpoint empty(ShardId shardId) { return empty(shardId, ""); @@ -55,10 +56,11 @@ private ReplicationCheckpoint(ShardId shardId, String codec) { length = 0L; this.codec = codec; this.metadataMap = Collections.emptyMap(); + this.createdTimeStamp = System.nanoTime(); } public ReplicationCheckpoint(ShardId shardId, long primaryTerm, long segmentsGen, long segmentInfosVersion, String codec) { - this(shardId, primaryTerm, segmentsGen, segmentInfosVersion, 0L, codec, Collections.emptyMap()); + this(shardId, primaryTerm, segmentsGen, segmentInfosVersion, 0L, codec, Collections.emptyMap(), System.nanoTime()); } public ReplicationCheckpoint( @@ -77,6 +79,27 @@ public ReplicationCheckpoint( this.length = length; this.codec = codec; this.metadataMap = metadataMap; + this.createdTimeStamp = System.nanoTime(); + } + + public ReplicationCheckpoint( + ShardId shardId, + long primaryTerm, + long segmentsGen, + long segmentInfosVersion, + long length, + String codec, + Map metadataMap, + long createdTimeStamp + ) { + this.shardId = shardId; + this.primaryTerm = primaryTerm; + this.segmentsGen = segmentsGen; + this.segmentInfosVersion = segmentInfosVersion; + this.length = length; + this.codec = codec; + this.metadataMap = metadataMap; + this.createdTimeStamp = createdTimeStamp; } public ReplicationCheckpoint(StreamInput in) throws IOException { @@ -96,6 +119,11 @@ public ReplicationCheckpoint(StreamInput in) throws IOException { } else { this.metadataMap = Collections.emptyMap(); } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + this.createdTimeStamp = in.readLong(); + } else { + this.createdTimeStamp = 0; + } } /** @@ -159,6 +187,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_10_0)) { out.writeMap(metadataMap, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeLong(createdTimeStamp); + } } @Override @@ -197,6 +228,10 @@ public Map getMetadataMap() { return metadataMap; } + public long getCreatedTimeStamp() { + return createdTimeStamp; + } + @Override public String toString() { return "ReplicationCheckpoint{" @@ -212,6 +247,8 @@ public String toString() { + length + ", codec=" + codec + + ", timestamp=" + + createdTimeStamp + '}'; } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index e1e5e4a3b455e..222c6e8ba36c4 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -957,7 +957,8 @@ protected Node( remoteStoreSettings, fileCache, compositeIndexSettings, - segmentReplicator::startReplication + segmentReplicator::startReplication, + segmentReplicator::getSegmentReplicationStats ); final IngestService ingestService = new IngestService( diff --git a/server/src/test/java/org/opensearch/common/io/VersionedCodecStreamWrapperTests.java b/server/src/test/java/org/opensearch/common/io/VersionedCodecStreamWrapperTests.java index 938337fc5146e..a88df528bcb86 100644 --- a/server/src/test/java/org/opensearch/common/io/VersionedCodecStreamWrapperTests.java +++ b/server/src/test/java/org/opensearch/common/io/VersionedCodecStreamWrapperTests.java @@ -38,16 +38,19 @@ public class VersionedCodecStreamWrapperTests extends OpenSearchTestCase { private static final int VERSION = 1; IndexIOStreamHandler ioStreamHandler; + IndexIOStreamHandlerFactory ioStreamHandlerFactory; VersionedCodecStreamWrapper versionedCodecStreamWrapper; @Before public void setup() throws IOException { + ioStreamHandlerFactory = mock(IndexIOStreamHandlerFactory.class); ioStreamHandler = mock(IndexIOStreamHandler.class); - versionedCodecStreamWrapper = new VersionedCodecStreamWrapper(ioStreamHandler, VERSION, CODEC); + versionedCodecStreamWrapper = new VersionedCodecStreamWrapper(ioStreamHandlerFactory, VERSION, VERSION, CODEC); } public void testReadStream() throws IOException { DummyObject expectedObject = new DummyObject("test read"); + when(ioStreamHandlerFactory.getHandler(VERSION)).thenReturn(ioStreamHandler); when(ioStreamHandler.readContent(any())).thenReturn(expectedObject); DummyObject readData = versionedCodecStreamWrapper.readStream(createHeaderFooterBytes(CODEC, VERSION, true, true)); assertEquals(readData, expectedObject); @@ -55,6 +58,7 @@ public void testReadStream() throws IOException { public void testReadWithOldVersionThrowsException() throws IOException { DummyObject expectedObject = new DummyObject("test read"); + when(ioStreamHandlerFactory.getHandler(VERSION)).thenReturn(ioStreamHandler); when(ioStreamHandler.readContent(any())).thenReturn(expectedObject); assertThrows( IndexFormatTooOldException.class, @@ -64,6 +68,7 @@ public void testReadWithOldVersionThrowsException() throws IOException { public void testReadWithNewVersionThrowsException() throws IOException { DummyObject expectedObject = new DummyObject("test read"); + when(ioStreamHandlerFactory.getHandler(VERSION)).thenReturn(ioStreamHandler); when(ioStreamHandler.readContent(any())).thenReturn(expectedObject); assertThrows( IndexFormatTooNewException.class, @@ -73,6 +78,7 @@ public void testReadWithNewVersionThrowsException() throws IOException { public void testReadWithUnexpectedCodecThrowsException() throws IOException { DummyObject expectedObject = new DummyObject("test read"); + when(ioStreamHandlerFactory.getHandler(VERSION)).thenReturn(ioStreamHandler); when(ioStreamHandler.readContent(any())).thenReturn(expectedObject); assertThrows( CorruptIndexException.class, @@ -82,6 +88,7 @@ public void testReadWithUnexpectedCodecThrowsException() throws IOException { public void testReadWithNoHeaderThrowsException() throws IOException { DummyObject expectedObject = new DummyObject("test read"); + when(ioStreamHandlerFactory.getHandler(VERSION)).thenReturn(ioStreamHandler); when(ioStreamHandler.readContent(any())).thenReturn(expectedObject); assertThrows( CorruptIndexException.class, @@ -91,6 +98,7 @@ public void testReadWithNoHeaderThrowsException() throws IOException { public void testReadWithNoFooterThrowsException() throws IOException { DummyObject expectedObject = new DummyObject("test read"); + when(ioStreamHandlerFactory.getHandler(VERSION)).thenReturn(ioStreamHandler); when(ioStreamHandler.readContent(any())).thenReturn(expectedObject); assertThrows( CorruptIndexException.class, @@ -102,6 +110,7 @@ public void testWriteStream() throws IOException { DummyObject expectedObject = new DummyObject("test read"); BytesStreamOutput output = new BytesStreamOutput(); OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput("dummy bytes", "dummy stream", output, 4096); + when(ioStreamHandlerFactory.getHandler(VERSION)).thenReturn(ioStreamHandler); doAnswer(invocation -> { IndexOutput io = invocation.getArgument(0); io.writeString("test write"); diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index bd86d3d396987..90f2b0b21cc8a 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -265,7 +265,8 @@ private IndexService newIndexService(IndexModule module) throws IOException { () -> IndexSettings.DEFAULT_REFRESH_INTERVAL, DefaultRecoverySettings.INSTANCE, DefaultRemoteStoreSettings.INSTANCE, - s -> {} + s -> {}, + null ); } diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 233a99cbe4a73..899e80965e4fd 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -1844,7 +1844,8 @@ public void testSegmentReplicationCheckpointTracking() { 1, 1L, Codec.getDefault().getName(), - Map.of("segment_1", segment_1) + Map.of("segment_1", segment_1), + 0L ); final ReplicationCheckpoint secondCheckpoint = new ReplicationCheckpoint( tracker.shardId(), @@ -1853,7 +1854,8 @@ public void testSegmentReplicationCheckpointTracking() { 2, 51L, Codec.getDefault().getName(), - Map.of("segment_1", segment_1, "segment_2", segment_2) + Map.of("segment_1", segment_1, "segment_2", segment_2), + 0L ); final ReplicationCheckpoint thirdCheckpoint = new ReplicationCheckpoint( tracker.shardId(), @@ -1862,7 +1864,8 @@ public void testSegmentReplicationCheckpointTracking() { 3, 151L, Codec.getDefault().getName(), - Map.of("segment_1", segment_1, "segment_2", segment_2, "segment_3", segment_3) + Map.of("segment_1", segment_1, "segment_2", segment_2, "segment_3", segment_3), + 0L ); tracker.setLatestReplicationCheckpoint(initialCheckpoint); @@ -1974,7 +1977,8 @@ public void testSegmentReplicationCheckpointForRelocatingPrimary() { 1, 5L, Codec.getDefault().getName(), - Map.of("segment_1", segment_1) + Map.of("segment_1", segment_1), + 0L ); tracker.setLatestReplicationCheckpoint(initialCheckpoint); tracker.startReplicationLagTimers(initialCheckpoint); @@ -2033,7 +2037,8 @@ public void testSegmentReplicationCheckpointTrackingInvalidAllocationIDs() { 1, 1L, Codec.getDefault().getName(), - Collections.emptyMap() + Collections.emptyMap(), + 0L ); tracker.setLatestReplicationCheckpoint(initialCheckpoint); tracker.startReplicationLagTimers(initialCheckpoint); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index df3df81361a12..d673eb49be581 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -37,7 +37,7 @@ import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; -import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandlerFactory; import org.opensearch.test.MockLogAppender; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.threadpool.ThreadPool; @@ -696,7 +696,8 @@ public void testUploadMetadataNonEmpty() throws IOException { eq(IOContext.DEFAULT) ); VersionedCodecStreamWrapper streamWrapper = new VersionedCodecStreamWrapper<>( - new RemoteSegmentMetadataHandler(), + new RemoteSegmentMetadataHandlerFactory(), + RemoteSegmentMetadata.CURRENT_VERSION, RemoteSegmentMetadata.CURRENT_VERSION, RemoteSegmentMetadata.METADATA_CODEC ); @@ -840,7 +841,7 @@ public void testHeaderMaxVersionCorruptIndexException() throws IOException { BytesStreamOutput output = new BytesStreamOutput(); OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput("segment metadata", "metadata output stream", output, 4096); - CodecUtil.writeHeader(indexOutput, RemoteSegmentMetadata.METADATA_CODEC, 2); + CodecUtil.writeHeader(indexOutput, RemoteSegmentMetadata.METADATA_CODEC, 3); indexOutput.writeMapOfStrings(metadata); CodecUtil.writeFooter(indexOutput); indexOutput.close(); @@ -1115,7 +1116,7 @@ public void testSegmentMetadataCurrentVersion() { If author doesn't want to support old metadata files. Then this can be ignored. After taking appropriate action, fix this test by setting the correct version here */ - assertEquals(RemoteSegmentMetadata.CURRENT_VERSION, 1); + assertEquals(RemoteSegmentMetadata.CURRENT_VERSION, 2); } private void indexDocs(int startDocId, int numberOfDocs) throws IOException { diff --git a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerFactoryTests.java b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerFactoryTests.java new file mode 100644 index 0000000000000..6911b84c58e4d --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerFactoryTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store.remote.metadata; + +import org.opensearch.common.io.IndexIOStreamHandler; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +/** + * Unit tests for {@link org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandlerFactoryTests}. + */ +public class RemoteSegmentMetadataHandlerFactoryTests extends OpenSearchTestCase { + + private RemoteSegmentMetadataHandlerFactory segmentMetadataHandlerFactory; + + @Before + public void setup() { + segmentMetadataHandlerFactory = new RemoteSegmentMetadataHandlerFactory(); + } + + public void testGetHandlerReturnsBasedOnVersion() { + IndexIOStreamHandler versionOneHandler = segmentMetadataHandlerFactory.getHandler(1); + assertTrue(versionOneHandler instanceof RemoteSegmentMetadataHandler); + IndexIOStreamHandler versionTwoHandler = segmentMetadataHandlerFactory.getHandler(2); + assertTrue(versionTwoHandler instanceof RemoteSegmentMetadataHandler); + } + + public void testGetHandlerWhenCalledMultipleTimesReturnsCachedHandler() { + IndexIOStreamHandler versionTwoHandlerOne = segmentMetadataHandlerFactory.getHandler(2); + IndexIOStreamHandler versionTwoHandlerTwo = segmentMetadataHandlerFactory.getHandler(2); + assertEquals(versionTwoHandlerOne, versionTwoHandlerTwo); + } + + public void testGetHandlerWhenHandlerNotProvidedThrowsException() { + Throwable throwable = assertThrows(IllegalArgumentException.class, () -> { segmentMetadataHandlerFactory.getHandler(3); }); + assertEquals("Unsupported RemoteSegmentMetadata version: 3", throwable.getMessage()); + } +} diff --git a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java index f5d54dc790e76..0a668bba28c74 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java @@ -44,7 +44,7 @@ public class RemoteSegmentMetadataHandlerTests extends IndexShardTestCase { @Before public void setup() throws IOException { - remoteSegmentMetadataHandler = new RemoteSegmentMetadataHandler(); + remoteSegmentMetadataHandler = new RemoteSegmentMetadataHandler(2); Settings indexSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactoryTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactoryTests.java new file mode 100644 index 0000000000000..767037160980e --- /dev/null +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactoryTests.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.common.io.IndexIOStreamHandler; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +/** + * Unit tests for {@link org.opensearch.index.translog.transfer.TranslogTransferMetadataHandlerFactoryTests}. + */ +public class TranslogTransferMetadataHandlerFactoryTests extends OpenSearchTestCase { + + private TranslogTransferMetadataHandlerFactory translogTransferMetadataHandlerFactory; + + @Before + public void setup() { + translogTransferMetadataHandlerFactory = new TranslogTransferMetadataHandlerFactory(); + } + + public void testGetHandlerReturnsBasedOnVersion() { + IndexIOStreamHandler versionOneHandler = translogTransferMetadataHandlerFactory.getHandler(1); + assertTrue(versionOneHandler instanceof TranslogTransferMetadataHandler); + } + + public void testGetHandlerWhenCalledMultipleTimesReturnsCachedHandler() { + IndexIOStreamHandler versionTwoHandlerOne = translogTransferMetadataHandlerFactory.getHandler(1); + IndexIOStreamHandler versionTwoHandlerTwo = translogTransferMetadataHandlerFactory.getHandler(1); + assertEquals(versionTwoHandlerOne, versionTwoHandlerTwo); + } + + public void testGetHandlerWhenHandlerNotProvidedThrowsException() { + Throwable throwable = assertThrows(IllegalArgumentException.class, () -> { translogTransferMetadataHandlerFactory.getHandler(2); }); + assertEquals("Unsupported TranslogTransferMetadata version: 2", throwable.getMessage()); + } +} diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 1faaa16ce5628..8a47b87b09f30 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -100,8 +100,6 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { private SegmentReplicationState state; private ReplicationCheckpoint initialCheckpoint; - private ClusterState clusterState; - private static final long TRANSPORT_TIMEOUT = 30000;// 30sec @Override @@ -140,13 +138,14 @@ public void setUp() throws Exception { indicesService = mock(IndicesService.class); ClusterService clusterService = mock(ClusterService.class); - clusterState = mock(ClusterState.class); + ClusterState clusterState = mock(ClusterState.class); RoutingTable mockRoutingTable = mock(RoutingTable.class); when(clusterService.state()).thenReturn(clusterState); when(clusterState.routingTable()).thenReturn(mockRoutingTable); when(mockRoutingTable.shardRoutingTable(any())).thenReturn(primaryShard.getReplicationGroup().getRoutingTable()); when(clusterState.nodes()).thenReturn(DiscoveryNodes.builder().add(localNode).build()); + sut = prepareForReplication(primaryShard, replicaShard, transportService, indicesService, clusterService); initialCheckpoint = primaryShard.getLatestReplicationCheckpoint(); aheadCheckpoint = new ReplicationCheckpoint( @@ -596,13 +595,6 @@ public void testShardRoutingChanged_DoesNothingForDocRepIndex() throws IOExcepti closeShards(shard); } - public void testUpdateLatestReceivedCheckpoint() { - final SegmentReplicationTargetService spy = spy(sut); - sut.updateLatestReceivedCheckpoint(checkpoint, replicaShard); - sut.updateLatestReceivedCheckpoint(aheadCheckpoint, replicaShard); - assertEquals(sut.latestReceivedCheckpoint.get(replicaShard.shardId()), aheadCheckpoint); - } - public void testForceSegmentSyncHandler() throws Exception { ForceSyncRequest forceSyncRequest = new ForceSyncRequest(1L, 1L, replicaShard.shardId()); when(indicesService.getShardOrNull(forceSyncRequest.getShardId())).thenReturn(replicaShard); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index 8b4b3aff701b4..52cb39bebd2b7 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -177,6 +177,9 @@ public void onFailure(Exception e) { logger.error("Unexpected onFailure", e); Assert.fail(); } + }, (ReplicationCheckpoint checkpoint, IndexShard indexShard) -> { + assertEquals(repCheckpoint, checkpoint); + assertEquals(indexShard, spyIndexShard); }); } @@ -230,7 +233,7 @@ public void onFailure(Exception e) { assertEquals(exception, e.getCause().getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } - }); + }, mock(BiConsumer.class)); } public void testFailureResponse_getSegmentFiles() { @@ -283,7 +286,7 @@ public void onFailure(Exception e) { assertEquals(exception, e.getCause().getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } - }); + }, mock(BiConsumer.class)); } public void testFailure_finalizeReplication_NonCorruptionException() throws IOException { @@ -330,7 +333,7 @@ public void onFailure(Exception e) { assertEquals(exception, e.getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } - }); + }, mock(BiConsumer.class)); } public void testFailure_finalizeReplication_IndexFormatException() throws IOException { @@ -376,7 +379,7 @@ public void onFailure(Exception e) { assertEquals(exception, e.getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } - }); + }, mock(BiConsumer.class)); } public void testFailure_differentSegmentFiles() throws IOException { @@ -429,7 +432,7 @@ public void onFailure(Exception e) { assertTrue(e.getMessage().contains("has local copies of segments that differ from the primary")); segrepTarget.fail(new ReplicationFailedException(e), false); } - }); + }, mock(BiConsumer.class)); } /** @@ -483,7 +486,7 @@ public void onFailure(Exception e) { logger.error("Unexpected onFailure", e); Assert.fail(); } - }); + }, mock(BiConsumer.class)); } /** diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java index 81ea16c80dd79..38f1c59bd5b68 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java @@ -9,6 +9,8 @@ package org.opensearch.indices.replication; import org.apache.lucene.store.IOContext; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.Version; import org.opensearch.OpenSearchCorruptionException; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -20,6 +22,8 @@ import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.replication.TestReplicationSource; import org.opensearch.index.shard.IndexShard; @@ -35,9 +39,11 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; @@ -200,6 +206,173 @@ public void getSegmentFiles( closeShards(primary, replica); } + public void testGetSegmentReplicationStats_WhenNoReplication() { + SegmentReplicator segmentReplicator = new SegmentReplicator(threadPool); + ShardId shardId = new ShardId("index", "uuid", 0); + ReplicationStats replicationStats = segmentReplicator.getSegmentReplicationStats(shardId); + assertEquals(0, replicationStats.maxReplicationLag); + assertEquals(0, replicationStats.totalBytesBehind); + assertEquals(0, replicationStats.maxBytesBehind); + } + + public void testGetSegmentReplicationStats_WhileOnGoingReplicationAndPrimaryRefreshedToNewCheckPoint() { + ShardId shardId = new ShardId("index", "uuid", 0); + ReplicationCheckpoint firstReplicationCheckpoint = ReplicationCheckpoint.empty(shardId); + + StoreFileMetadata storeFileMetadata1 = new StoreFileMetadata("test-1", 500, "1", Version.LATEST, new BytesRef(500)); + StoreFileMetadata storeFileMetadata2 = new StoreFileMetadata("test-2", 500, "1", Version.LATEST, new BytesRef(500)); + Map stringStoreFileMetadataMapOne = new HashMap<>(); + stringStoreFileMetadataMapOne.put("test-1", storeFileMetadata1); + stringStoreFileMetadataMapOne.put("test-2", storeFileMetadata2); + ReplicationCheckpoint secondReplicationCheckpoint = new ReplicationCheckpoint( + shardId, + 2, + 2, + 2, + 1000, + "", + stringStoreFileMetadataMapOne, + System.nanoTime() - TimeUnit.MINUTES.toNanos(1) + ); + + IndexShard replicaShard = mock(IndexShard.class); + when(replicaShard.shardId()).thenReturn(shardId); + when(replicaShard.getLatestReplicationCheckpoint()).thenReturn(firstReplicationCheckpoint) + .thenReturn(firstReplicationCheckpoint) + .thenReturn(firstReplicationCheckpoint) + .thenReturn(secondReplicationCheckpoint); + + SegmentReplicator segmentReplicator = new SegmentReplicator(threadPool); + segmentReplicator.initializeStats(shardId); + segmentReplicator.updateReplicationCheckpointStats(firstReplicationCheckpoint, replicaShard); + segmentReplicator.updateReplicationCheckpointStats(secondReplicationCheckpoint, replicaShard); + + Map stringStoreFileMetadataMapTwo = new HashMap<>(); + StoreFileMetadata storeFileMetadata3 = new StoreFileMetadata("test-3", 200, "1", Version.LATEST, new BytesRef(200)); + stringStoreFileMetadataMapTwo.put("test-1", storeFileMetadata1); + stringStoreFileMetadataMapTwo.put("test-2", storeFileMetadata2); + stringStoreFileMetadataMapTwo.put("test-3", storeFileMetadata3); + ReplicationCheckpoint thirdReplicationCheckpoint = new ReplicationCheckpoint( + shardId, + 3, + 3, + 3, + 200, + "", + stringStoreFileMetadataMapTwo, + System.nanoTime() - TimeUnit.MINUTES.toNanos(1) + ); + + segmentReplicator.updateReplicationCheckpointStats(thirdReplicationCheckpoint, replicaShard); + + ReplicationStats replicationStatsFirst = segmentReplicator.getSegmentReplicationStats(shardId); + assertEquals(1200, replicationStatsFirst.totalBytesBehind); + assertEquals(1200, replicationStatsFirst.maxBytesBehind); + assertTrue(replicationStatsFirst.maxReplicationLag > 0); + + segmentReplicator.pruneCheckpointsUpToLastSync(replicaShard); + + ReplicationStats replicationStatsSecond = segmentReplicator.getSegmentReplicationStats(shardId); + assertEquals(200, replicationStatsSecond.totalBytesBehind); + assertEquals(200, replicationStatsSecond.maxBytesBehind); + assertTrue(replicationStatsSecond.maxReplicationLag > 0); + } + + public void testGetSegmentReplicationStats_WhenCheckPointReceivedOutOfOrder() { + ShardId shardId = new ShardId("index", "uuid", 0); + ReplicationCheckpoint firstReplicationCheckpoint = ReplicationCheckpoint.empty(shardId); + + StoreFileMetadata storeFileMetadata1 = new StoreFileMetadata("test-1", 500, "1", Version.LATEST, new BytesRef(500)); + StoreFileMetadata storeFileMetadata2 = new StoreFileMetadata("test-2", 500, "1", Version.LATEST, new BytesRef(500)); + Map stringStoreFileMetadataMapOne = new HashMap<>(); + stringStoreFileMetadataMapOne.put("test-1", storeFileMetadata1); + stringStoreFileMetadataMapOne.put("test-2", storeFileMetadata2); + ReplicationCheckpoint secondReplicationCheckpoint = new ReplicationCheckpoint( + shardId, + 2, + 2, + 2, + 1000, + "", + stringStoreFileMetadataMapOne, + System.nanoTime() - TimeUnit.MINUTES.toNanos(1) + ); + + IndexShard replicaShard = mock(IndexShard.class); + when(replicaShard.shardId()).thenReturn(shardId); + when(replicaShard.getLatestReplicationCheckpoint()).thenReturn(firstReplicationCheckpoint) + .thenReturn(firstReplicationCheckpoint) + .thenReturn(firstReplicationCheckpoint); + + SegmentReplicator segmentReplicator = new SegmentReplicator(threadPool); + segmentReplicator.initializeStats(shardId); + segmentReplicator.updateReplicationCheckpointStats(firstReplicationCheckpoint, replicaShard); + + Map stringStoreFileMetadataMapTwo = new HashMap<>(); + StoreFileMetadata storeFileMetadata3 = new StoreFileMetadata("test-3", 200, "1", Version.LATEST, new BytesRef(200)); + stringStoreFileMetadataMapTwo.put("test-1", storeFileMetadata1); + stringStoreFileMetadataMapTwo.put("test-2", storeFileMetadata2); + stringStoreFileMetadataMapTwo.put("test-3", storeFileMetadata3); + ReplicationCheckpoint thirdReplicationCheckpoint = new ReplicationCheckpoint( + shardId, + 3, + 3, + 3, + 200, + "", + stringStoreFileMetadataMapTwo, + System.nanoTime() - TimeUnit.MINUTES.toNanos(1) + ); + + segmentReplicator.updateReplicationCheckpointStats(thirdReplicationCheckpoint, replicaShard); + + ReplicationStats replicationStatsFirst = segmentReplicator.getSegmentReplicationStats(shardId); + assertEquals(1200, replicationStatsFirst.totalBytesBehind); + assertEquals(1200, replicationStatsFirst.maxBytesBehind); + assertTrue(replicationStatsFirst.maxReplicationLag > 0); + + segmentReplicator.updateReplicationCheckpointStats(secondReplicationCheckpoint, replicaShard); + ReplicationStats replicationStatsSecond = segmentReplicator.getSegmentReplicationStats(shardId); + assertEquals(1200, replicationStatsSecond.totalBytesBehind); + assertEquals(1200, replicationStatsSecond.maxBytesBehind); + assertTrue(replicationStatsSecond.maxReplicationLag > 0); + } + + public void testUpdateReplicationCheckpointStatsIgnoresWhenOutOfOrderCheckPointReceived() { + ShardId shardId = new ShardId("index", "uuid", 0); + IndexShard replicaShard = mock(IndexShard.class); + when(replicaShard.shardId()).thenReturn(shardId); + + SegmentReplicator segmentReplicator = new SegmentReplicator(threadPool); + ReplicationCheckpoint replicationCheckpoint = new ReplicationCheckpoint( + shardId, + 2, + 2, + 2, + 1000, + "", + new HashMap<>(), + System.nanoTime() - TimeUnit.MINUTES.toNanos(1) + ); + segmentReplicator.updateReplicationCheckpointStats(replicationCheckpoint, replicaShard); + + assertEquals(replicationCheckpoint, segmentReplicator.getPrimaryCheckpoint(shardId)); + + ReplicationCheckpoint oldReplicationCheckpoint = new ReplicationCheckpoint( + shardId, + 1, + 1, + 1, + 500, + "", + new HashMap<>(), + System.nanoTime() - TimeUnit.MINUTES.toNanos(1) + ); + segmentReplicator.updateReplicationCheckpointStats(oldReplicationCheckpoint, replicaShard); + + assertEquals(replicationCheckpoint, segmentReplicator.getPrimaryCheckpoint(shardId)); + } + protected void resolveCheckpointListener(ActionListener listener, IndexShard primary) { try (final CopyState copyState = new CopyState(primary)) { listener.onResponse( @@ -209,5 +382,4 @@ protected void resolveCheckpointListener(ActionListener throw new UncheckedIOException(e); } } - } diff --git a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java index 0b30486038e3a..3b7c5560f89fb 100644 --- a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java @@ -81,7 +81,8 @@ public static IndexShard createMockIndexShard() throws IOException { 0L, 0L, Codec.getDefault().getName(), - SI_SNAPSHOT.asMap() + SI_SNAPSHOT.asMap(), + 0L ); final Tuple, ReplicationCheckpoint> gatedCloseableReplicationCheckpointTuple = new Tuple<>( new GatedCloseable<>(testSegmentInfos, () -> {}), diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 655a9eb7d5d38..bdd4b40e398d5 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -86,6 +86,7 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.index.IndexSettings; import org.opensearch.index.MapperTestUtils; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.VersionType; import org.opensearch.index.cache.IndexCache; import org.opensearch.index.cache.query.DisabledQueryCache; @@ -688,6 +689,9 @@ protected IndexShard newShard( } return new InternalTranslogFactory(); }; + // This is fine since we are not testing the node stats now + Function mockReplicationStatsProvider = mock(Function.class); + when(mockReplicationStatsProvider.apply(any())).thenReturn(new ReplicationStats(800, 800, 500)); indexShard = new IndexShard( routing, indexSettings, @@ -717,7 +721,8 @@ protected IndexShard newShard( DefaultRecoverySettings.INSTANCE, DefaultRemoteStoreSettings.INSTANCE, false, - discoveryNodes + discoveryNodes, + mockReplicationStatsProvider ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); if (remoteStoreStatsTrackerFactory != null) { From 415abb91dbb39245690faf349b2cf71e6f65dca0 Mon Sep 17 00:00:00 2001 From: Varun Bharadwaj Date: Thu, 27 Feb 2025 14:45:19 -0800 Subject: [PATCH 28/48] [Pull-based Ingestion] Support segment replication for pull-based ingestion (#17359) --- .../plugin/kafka/IngestFromKafkaIT.java | 212 ++--- .../plugin/kafka/KafkaIngestionBaseIT.java | 111 +++ .../plugin/kafka/RemoteStoreKafkaIT.java | 125 +++ ...ava => TestContainerThreadLeakFilter.java} | 7 +- .../index/engine/IngestionEngine.java | 843 ++---------------- .../index/engine/InternalEngine.java | 74 +- .../translog/InternalTranslogManager.java | 7 +- .../index/translog/NoOpTranslogManager.java | 60 ++ .../opensearch/index/translog/Translog.java | 2 + .../index/translog/TranslogManager.java | 45 +- .../pollingingest/IngestionEngineFactory.java | 5 + .../index/engine/IngestionEngineTests.java | 23 +- .../opensearch/test/InternalTestCluster.java | 5 +- 13 files changed, 548 insertions(+), 971 deletions(-) create mode 100644 plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java create mode 100644 plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java rename plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/{TestContainerWatchdogThreadLeakFilter.java => TestContainerThreadLeakFilter.java} (76%) diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java index d6b099c6b24d8..d51569431506a 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java @@ -8,12 +8,6 @@ package org.opensearch.plugin.kafka; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; - -import org.apache.kafka.clients.producer.KafkaProducer; -import org.apache.kafka.clients.producer.Producer; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.common.serialization.StringSerializer; import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; @@ -22,40 +16,24 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.index.query.RangeQueryBuilder; -import org.opensearch.plugins.Plugin; import org.opensearch.plugins.PluginInfo; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Assert; -import java.util.Arrays; -import java.util.Collection; import java.util.List; -import java.util.Properties; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; -import org.testcontainers.containers.KafkaContainer; -import org.testcontainers.utility.DockerImageName; - import static org.hamcrest.Matchers.is; import static org.awaitility.Awaitility.await; /** * Integration test for Kafka ingestion */ -@ThreadLeakFilters(filters = TestContainerWatchdogThreadLeakFilter.class) -public class IngestFromKafkaIT extends OpenSearchIntegTestCase { - static final String topicName = "test"; - - private KafkaContainer kafka; - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(KafkaPlugin.class); - } - +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class IngestFromKafkaIT extends KafkaIngestionBaseIT { /** * test ingestion-kafka-plugin is installed */ @@ -75,128 +53,86 @@ public void testPluginsAreInstalled() { } public void testKafkaIngestion() { - try { - setupKafka(); - // create an index with ingestion source from kafka - createIndex( - "test", - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("ingestion_source.type", "kafka") - .put("ingestion_source.pointer.init.reset", "earliest") - .put("ingestion_source.param.topic", "test") - .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) - .build(), - "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" - ); + produceData("1", "name1", "24"); + produceData("2", "name2", "20"); + + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.pointer.init.reset", "earliest") + .put("ingestion_source.param.topic", "test") + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .put("index.replication.type", "SEGMENT") + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" + ); - RangeQueryBuilder query = new RangeQueryBuilder("age").gte(21); - await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { - refresh("test"); - SearchResponse response = client().prepareSearch("test").setQuery(query).get(); - assertThat(response.getHits().getTotalHits().value(), is(1L)); - }); - } finally { - stopKafka(); - } + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(21); + await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + refresh("test"); + SearchResponse response = client().prepareSearch("test").setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + }); } public void testKafkaIngestion_RewindByTimeStamp() { - try { - setupKafka(); - // create an index with ingestion source from kafka - createIndex( - "test_rewind_by_timestamp", - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("ingestion_source.type", "kafka") - .put("ingestion_source.pointer.init.reset", "rewind_by_timestamp") - // 1739459500000 is the timestamp of the first message - // 1739459800000 is the timestamp of the second message - // by resetting to 1739459600000, only the second message will be ingested - .put("ingestion_source.pointer.init.reset.value", "1739459600000") - .put("ingestion_source.param.topic", "test") - .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) - .put("ingestion_source.param.auto.offset.reset", "latest") - .build(), - "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" - ); + produceData("1", "name1", "24", 1739459500000L); + produceData("2", "name2", "20", 1739459800000L); + + // create an index with ingestion source from kafka + createIndex( + "test_rewind_by_timestamp", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.pointer.init.reset", "rewind_by_timestamp") + // 1739459500000 is the timestamp of the first message + // 1739459800000 is the timestamp of the second message + // by resetting to 1739459600000, only the second message will be ingested + .put("ingestion_source.pointer.init.reset.value", "1739459600000") + .put("ingestion_source.param.topic", "test") + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .put("ingestion_source.param.auto.offset.reset", "latest") + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" + ); - RangeQueryBuilder query = new RangeQueryBuilder("age").gte(0); - await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { - refresh("test_rewind_by_timestamp"); - SearchResponse response = client().prepareSearch("test_rewind_by_timestamp").setQuery(query).get(); - assertThat(response.getHits().getTotalHits().value(), is(1L)); - }); - } finally { - stopKafka(); - } + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(0); + await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + refresh("test_rewind_by_timestamp"); + SearchResponse response = client().prepareSearch("test_rewind_by_timestamp").setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + }); } public void testKafkaIngestion_RewindByOffset() { - try { - setupKafka(); - // create an index with ingestion source from kafka - createIndex( - "test_rewind_by_offset", - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("ingestion_source.type", "kafka") - .put("ingestion_source.pointer.init.reset", "rewind_by_offset") - .put("ingestion_source.pointer.init.reset.value", "1") - .put("ingestion_source.param.topic", "test") - .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) - .put("ingestion_source.param.auto.offset.reset", "latest") - .build(), - "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" - ); - - RangeQueryBuilder query = new RangeQueryBuilder("age").gte(0); - await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { - refresh("test_rewind_by_offset"); - SearchResponse response = client().prepareSearch("test_rewind_by_offset").setQuery(query).get(); - assertThat(response.getHits().getTotalHits().value(), is(1L)); - }); - } finally { - stopKafka(); - } - } - - private void setupKafka() { - kafka = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:6.2.1")) - // disable topic auto creation - .withEnv("KAFKA_AUTO_CREATE_TOPICS_ENABLE", "false"); - kafka.start(); - prepareKafkaData(); - } - - private void stopKafka() { - if (kafka != null) { - kafka.stop(); - } - } - - private void prepareKafkaData() { - String boostrapServers = kafka.getBootstrapServers(); - KafkaUtils.createTopic(topicName, 1, boostrapServers); - Properties props = new Properties(); - props.put("bootstrap.servers", kafka.getBootstrapServers()); - Producer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); - producer.send( - new ProducerRecord<>(topicName, null, 1739459500000L, "null", "{\"_id\":\"1\",\"_source\":{\"name\":\"bob\", \"age\": 24}}") - ); - producer.send( - new ProducerRecord<>( - topicName, - null, - 1739459800000L, - "null", - "{\"_id\":\"2\", \"_op_type:\":\"index\",\"_source\":{\"name\":\"alice\", \"age\": 20}}" - ) + produceData("1", "name1", "24"); + produceData("2", "name2", "20"); + // create an index with ingestion source from kafka + createIndex( + "test_rewind_by_offset", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.pointer.init.reset", "rewind_by_offset") + .put("ingestion_source.pointer.init.reset.value", "1") + .put("ingestion_source.param.topic", "test") + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .put("ingestion_source.param.auto.offset.reset", "latest") + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" ); - producer.close(); + + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(0); + await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + refresh("test_rewind_by_offset"); + SearchResponse response = client().prepareSearch("test_rewind_by_offset").setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + }); } } diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java new file mode 100644 index 0000000000000..087bc9786872f --- /dev/null +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java @@ -0,0 +1,111 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.serialization.StringSerializer; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Properties; +import java.util.concurrent.TimeUnit; + +import org.testcontainers.containers.KafkaContainer; +import org.testcontainers.utility.DockerImageName; + +/** + * Base test class for Kafka ingestion tests + */ +@ThreadLeakFilters(filters = TestContainerThreadLeakFilter.class) +public class KafkaIngestionBaseIT extends OpenSearchIntegTestCase { + static final String topicName = "test"; + static final String indexName = "testindex"; + static final String mapping = "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}"; + static final long defaultMessageTimestamp = 1739459500000L; + + protected KafkaContainer kafka; + protected Producer producer; + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(KafkaPlugin.class); + } + + @Before + private void setup() { + setupKafka(); + } + + @After + private void cleanup() { + stopKafka(); + } + + private void setupKafka() { + kafka = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:6.2.1")) + // disable topic auto creation + .withEnv("KAFKA_AUTO_CREATE_TOPICS_ENABLE", "false"); + kafka.start(); + + // setup producer + String boostrapServers = kafka.getBootstrapServers(); + KafkaUtils.createTopic(topicName, 1, boostrapServers); + Properties props = new Properties(); + props.put("bootstrap.servers", kafka.getBootstrapServers()); + producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); + } + + private void stopKafka() { + if (producer != null) { + producer.close(); + } + + if (kafka != null) { + kafka.stop(); + } + } + + protected void produceData(String id, String name, String age) { + produceData(id, name, age, defaultMessageTimestamp); + } + + protected void produceData(String id, String name, String age, long timestamp) { + String payload = String.format( + Locale.ROOT, + "{\"_id\":\"%s\", \"_op_type:\":\"index\",\"_source\":{\"name\":\"%s\", \"age\": %s}}", + id, + name, + age + ); + producer.send(new ProducerRecord<>(topicName, null, timestamp, "null", payload)); + } + + protected void waitForSearchableDocs(long docCount, List nodes) throws Exception { + assertBusy(() -> { + for (String node : nodes) { + final SearchResponse response = client(node).prepareSearch(indexName).setSize(0).setPreference("_only_local").get(); + final long hits = response.getHits().getTotalHits().value(); + if (hits < docCount) { + fail("Expected search hits on node: " + node + " to be at least " + docCount + " but was: " + hits); + } + } + }, 1, TimeUnit.MINUTES); + } +} diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java new file mode 100644 index 0000000000000..a9f818a9ca825 --- /dev/null +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java @@ -0,0 +1,125 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; +import java.util.Arrays; + +import static org.hamcrest.Matchers.is; + +/** + * Integration tests for segment replication with remote store using kafka as ingestion source. + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStoreKafkaIT extends KafkaIngestionBaseIT { + private static final String REPOSITORY_NAME = "test-remote-store-repo"; + private Path absolutePath; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + if (absolutePath == null) { + absolutePath = randomRepoPath().toAbsolutePath(); + } + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) + .build(); + } + + public void testSegmentReplicationWithRemoteStore() throws Exception { + // Step 1: Create primary and replica nodes. Create index with 1 replica and kafka as ingestion source. + + internalCluster().startClusterManagerOnlyNode(); + final String nodeA = internalCluster().startDataOnlyNode(); + + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.pointer.init.reset", "earliest") + .put("ingestion_source.param.topic", topicName) + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .put("index.replication.type", "SEGMENT") + .build(), + mapping + ); + + ensureYellowAndNoInitializingShards(indexName); + final String nodeB = internalCluster().startDataOnlyNode(); + ensureGreen(indexName); + assertTrue(nodeA.equals(primaryNodeName(indexName))); + assertTrue(nodeB.equals(replicaNodeName(indexName))); + verifyRemoteStoreEnabled(nodeA); + verifyRemoteStoreEnabled(nodeB); + + // Step 2: Produce update messages and validate segment replication + + produceData("1", "name1", "24"); + produceData("2", "name2", "20"); + refresh(indexName); + waitForSearchableDocs(2, Arrays.asList(nodeA, nodeB)); + + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(21); + SearchResponse primaryResponse = client(nodeA).prepareSearch(indexName).setQuery(query).setPreference("_only_local").get(); + assertThat(primaryResponse.getHits().getTotalHits().value(), is(1L)); + SearchResponse replicaResponse = client(nodeB).prepareSearch(indexName).setQuery(query).setPreference("_only_local").get(); + assertThat(replicaResponse.getHits().getTotalHits().value(), is(1L)); + + // Step 3: Stop current primary node and validate replica promotion. + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeA)); + ensureYellowAndNoInitializingShards(indexName); + assertTrue(nodeB.equals(primaryNodeName(indexName))); + + // Step 4: Verify new primary node is able to index documents + + produceData("3", "name3", "30"); + produceData("4", "name4", "31"); + refresh(indexName); + waitForSearchableDocs(4, Arrays.asList(nodeB)); + + SearchResponse newPrimaryResponse = client(nodeB).prepareSearch(indexName).setQuery(query).setPreference("_only_local").get(); + assertThat(newPrimaryResponse.getHits().getTotalHits().value(), is(3L)); + + // Step 5: Add a new node and assign the replica shard. Verify node recovery works. + + final String nodeC = internalCluster().startDataOnlyNode(); + client().admin().cluster().prepareReroute().add(new AllocateReplicaAllocationCommand(indexName, 0, nodeC)).get(); + ensureGreen(indexName); + assertTrue(nodeC.equals(replicaNodeName(indexName))); + verifyRemoteStoreEnabled(nodeC); + + waitForSearchableDocs(4, Arrays.asList(nodeC)); + SearchResponse newReplicaResponse = client(nodeC).prepareSearch(indexName).setQuery(query).setPreference("_only_local").get(); + assertThat(newReplicaResponse.getHits().getTotalHits().value(), is(3L)); + + // Step 6: Produce new updates and verify segment replication works when primary and replica index are not empty. + produceData("5", "name5", "40"); + produceData("6", "name6", "41"); + refresh(indexName); + waitForSearchableDocs(6, Arrays.asList(nodeB, nodeC)); + } + + private void verifyRemoteStoreEnabled(String node) { + GetSettingsResponse settingsResponse = client(node).admin().indices().prepareGetSettings(indexName).get(); + String remoteStoreEnabled = settingsResponse.getIndexToSettings().get(indexName).get("index.remote_store.enabled"); + assertEquals("Remote store should be enabled", "true", remoteStoreEnabled); + } +} diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerWatchdogThreadLeakFilter.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerThreadLeakFilter.java similarity index 76% rename from plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerWatchdogThreadLeakFilter.java rename to plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerThreadLeakFilter.java index 50b88c6233a46..91e2c83ebfa48 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerWatchdogThreadLeakFilter.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerThreadLeakFilter.java @@ -13,11 +13,12 @@ /** * The {@link org.testcontainers.images.TimeLimitedLoggedPullImageResultCallback} instance used by test containers, * for example {@link org.testcontainers.containers.KafkaContainer} creates a watcher daemon thread which is never - * stopped. This filter excludes that thread from the thread leak detection logic. + * stopped. This filter excludes that thread from the thread leak detection logic. It also excludes ryuk resource reaper + * thread which is not closed on time. */ -public final class TestContainerWatchdogThreadLeakFilter implements ThreadFilter { +public final class TestContainerThreadLeakFilter implements ThreadFilter { @Override public boolean reject(Thread t) { - return t.getName().startsWith("testcontainers-pull-watchdog-"); + return t.getName().startsWith("testcontainers-pull-watchdog-") || t.getName().startsWith("testcontainers-ryuk"); } } diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java index b37281b9d1582..72b59ba88b4c2 100644 --- a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -8,145 +8,54 @@ package org.opensearch.index.engine; -import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.MergePolicy; -import org.apache.lucene.index.SegmentCommitInfo; -import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.ReferenceManager; -import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.LockObtainFailedException; -import org.apache.lucene.util.InfoStream; import org.opensearch.ExceptionsHelper; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IngestionSource; -import org.opensearch.common.Booleans; -import org.opensearch.common.Nullable; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.concurrent.GatedCloseable; -import org.opensearch.common.lucene.LoggerInfoStream; import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.util.concurrent.ReleasableLock; -import org.opensearch.common.util.io.IOUtils; -import org.opensearch.core.common.unit.ByteSizeValue; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.IndexSettings; import org.opensearch.index.IngestionConsumerFactory; import org.opensearch.index.IngestionShardConsumer; import org.opensearch.index.IngestionShardPointer; import org.opensearch.index.mapper.DocumentMapperForType; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.ParseContext; -import org.opensearch.index.merge.MergeStats; -import org.opensearch.index.merge.OnGoingMerge; -import org.opensearch.index.seqno.SeqNoStats; -import org.opensearch.index.shard.OpenSearchMergePolicy; +import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.translog.NoOpTranslogManager; import org.opensearch.index.translog.Translog; -import org.opensearch.index.translog.TranslogCorruptedException; +import org.opensearch.index.translog.TranslogDeletionPolicy; import org.opensearch.index.translog.TranslogManager; import org.opensearch.index.translog.TranslogStats; +import org.opensearch.index.translog.listener.CompositeTranslogEventListener; import org.opensearch.indices.pollingingest.DefaultStreamPoller; import org.opensearch.indices.pollingingest.StreamPoller; -import org.opensearch.search.suggest.completion.CompletionStats; -import org.opensearch.threadpool.ThreadPool; -import java.io.Closeable; import java.io.IOException; -import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.BiConsumer; import java.util.function.BiFunction; -import java.util.function.UnaryOperator; import static org.opensearch.index.translog.Translog.EMPTY_TRANSLOG_SNAPSHOT; /** * IngestionEngine is an engine that ingests data from a stream source. */ -public class IngestionEngine extends Engine { - - private volatile SegmentInfos lastCommittedSegmentInfos; - private final CompletionStatsCache completionStatsCache; - private final IndexWriter indexWriter; - private final OpenSearchReaderManager internalReaderManager; - private final ExternalReaderManager externalReaderManager; - private final Lock flushLock = new ReentrantLock(); - private final ReentrantLock optimizeLock = new ReentrantLock(); - private final OpenSearchConcurrentMergeScheduler mergeScheduler; - private final AtomicBoolean shouldPeriodicallyFlushAfterBigMerge = new AtomicBoolean(false); - private final TranslogManager translogManager; - private final DocumentMapperForType documentMapperForType; - private final IngestionConsumerFactory ingestionConsumerFactory; - private StreamPoller streamPoller; +public class IngestionEngine extends InternalEngine { - /** - * UUID value that is updated every time the engine is force merged. - */ - @Nullable - private volatile String forceMergeUUID; + private StreamPoller streamPoller; + private final IngestionConsumerFactory ingestionConsumerFactory; + private final DocumentMapperForType documentMapperForType; public IngestionEngine(EngineConfig engineConfig, IngestionConsumerFactory ingestionConsumerFactory) { super(engineConfig); - store.incRef(); - boolean success = false; - try { - this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); - this.completionStatsCache = new CompletionStatsCache(() -> acquireSearcher("completion_stats")); - IndexMetadata indexMetadata = engineConfig.getIndexSettings().getIndexMetadata(); - assert indexMetadata != null; - mergeScheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings()); - indexWriter = createWriter(); - externalReaderManager = createReaderManager(new InternalEngine.RefreshWarmerListener(logger, isClosed, engineConfig)); - internalReaderManager = externalReaderManager.internalReaderManager; - translogManager = new NoOpTranslogManager( - shardId, - readLock, - this::ensureOpen, - new TranslogStats(0, 0, 0, 0, 0), - EMPTY_TRANSLOG_SNAPSHOT - ); - documentMapperForType = engineConfig.getDocumentMapperForTypeSupplier().get(); - this.ingestionConsumerFactory = Objects.requireNonNull(ingestionConsumerFactory); - - success = true; - } catch (IOException | TranslogCorruptedException e) { - throw new EngineCreationFailureException(shardId, "failed to create engine", e); - } finally { - if (!success) { - if (streamPoller != null) { - try { - streamPoller.close(); - } catch (IOException e) { - logger.error("failed to close stream poller", e); - throw new RuntimeException(e); - } - } - if (!isClosed.get()) { - // failure, we need to dec the store reference - store.decRef(); - } - } - } + this.ingestionConsumerFactory = Objects.requireNonNull(ingestionConsumerFactory); + this.documentMapperForType = engineConfig.getDocumentMapperForTypeSupplier().get(); + } /** @@ -169,11 +78,11 @@ public void start() { engineConfig.getShardId().getId() ); logger.info("created ingestion consumer for shard [{}]", engineConfig.getShardId()); - - Map commitData = commitDataAsMap(); + Map commitData = commitDataAsMap(indexWriter); StreamPoller.ResetState resetState = ingestionSource.getPointerInitReset().getType(); IngestionShardPointer startPointer = null; Set persistedPointers = new HashSet<>(); + if (commitData.containsKey(StreamPoller.BATCH_START)) { // try recovering from commit data String batchStartStr = commitData.get(StreamPoller.BATCH_START); @@ -190,23 +99,13 @@ public void start() { String resetValue = ingestionSource.getPointerInitReset().getValue(); streamPoller = new DefaultStreamPoller(startPointer, persistedPointers, ingestionShardConsumer, this, resetState, resetValue); - streamPoller.start(); - } - private IndexWriter createWriter() throws IOException { - try { - final IndexWriterConfig iwc = getIndexWriterConfig(); - return createWriter(store.directory(), iwc); - } catch (LockObtainFailedException ex) { - logger.warn("could not lock IndexWriter", ex); - throw ex; + // Poller is only started on the primary shard. Replica shards will rely on segment replication. + if (!engineConfig.isReadOnlyReplica()) { + streamPoller.start(); } } - public DocumentMapperForType getDocumentMapperForType() { - return documentMapperForType; - } - protected Set fetchPersistedOffsets(DirectoryReader directoryReader, IngestionShardPointer batchStart) throws IOException { final IndexSearcher searcher = new IndexSearcher(directoryReader); @@ -228,195 +127,6 @@ protected Set fetchPersistedOffsets(DirectoryReader direc return result; } - /** - * a copy of ExternalReaderManager from InternalEngine - */ - @SuppressForbidden(reason = "reference counting is required here") - static final class ExternalReaderManager extends ReferenceManager { - private final BiConsumer refreshListener; - private final OpenSearchReaderManager internalReaderManager; - private boolean isWarmedUp; // guarded by refreshLock - - ExternalReaderManager( - OpenSearchReaderManager internalReaderManager, - BiConsumer refreshListener - ) throws IOException { - this.refreshListener = refreshListener; - this.internalReaderManager = internalReaderManager; - this.current = internalReaderManager.acquire(); // steal the reference without warming up - } - - @Override - protected OpenSearchDirectoryReader refreshIfNeeded(OpenSearchDirectoryReader referenceToRefresh) throws IOException { - // we simply run a blocking refresh on the internal reference manager and then steal it's reader - // it's a save operation since we acquire the reader which incs it's reference but then down the road - // steal it by calling incRef on the "stolen" reader - internalReaderManager.maybeRefreshBlocking(); - final OpenSearchDirectoryReader newReader = internalReaderManager.acquire(); - if (isWarmedUp == false || newReader != referenceToRefresh) { - boolean success = false; - try { - refreshListener.accept(newReader, isWarmedUp ? referenceToRefresh : null); - isWarmedUp = true; - success = true; - } finally { - if (success == false) { - internalReaderManager.release(newReader); - } - } - } - // nothing has changed - both ref managers share the same instance so we can use reference equality - if (referenceToRefresh == newReader) { - internalReaderManager.release(newReader); - return null; - } else { - return newReader; // steal the reference - } - } - - @Override - protected boolean tryIncRef(OpenSearchDirectoryReader reference) { - return reference.tryIncRef(); - } - - @Override - protected int getRefCount(OpenSearchDirectoryReader reference) { - return reference.getRefCount(); - } - - @Override - protected void decRef(OpenSearchDirectoryReader reference) throws IOException { - reference.decRef(); - } - } - - private ExternalReaderManager createReaderManager(InternalEngine.RefreshWarmerListener externalRefreshListener) throws EngineException { - boolean success = false; - OpenSearchReaderManager internalReaderManager = null; - try { - try { - final OpenSearchDirectoryReader directoryReader = OpenSearchDirectoryReader.wrap( - DirectoryReader.open(indexWriter), - shardId - ); - internalReaderManager = new OpenSearchReaderManager(directoryReader); - lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); - ExternalReaderManager externalReaderManager = new ExternalReaderManager(internalReaderManager, externalRefreshListener); - success = true; - return externalReaderManager; - } catch (IOException e) { - maybeFailEngine("start", e); - try { - indexWriter.rollback(); - } catch (IOException inner) { // iw is closed below - e.addSuppressed(inner); - } - throw new EngineCreationFailureException(shardId, "failed to open reader on writer", e); - } - } finally { - if (success == false) { // release everything we created on a failure - IOUtils.closeWhileHandlingException(internalReaderManager, indexWriter); - } - } - } - - // pkg-private for testing - IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException { - return new IndexWriter(directory, iwc); - } - - private IndexWriterConfig getIndexWriterConfig() { - final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer()); - iwc.setCommitOnClose(false); // we by default don't commit on close - iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); - // with tests.verbose, lucene sets this up: plumb to align with filesystem stream - boolean verbose = false; - try { - verbose = Boolean.parseBoolean(System.getProperty("tests.verbose")); - } catch (Exception ignore) {} - iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger)); - iwc.setMergeScheduler(mergeScheduler); - // set merge scheduler - MergePolicy mergePolicy = config().getMergePolicy(); - boolean shuffleForcedMerge = Booleans.parseBoolean(System.getProperty("opensearch.shuffle_forced_merge", Boolean.TRUE.toString())); - if (shuffleForcedMerge) { - // We wrap the merge policy for all indices even though it is mostly useful for time-based indices - // but there should be no overhead for other type of indices so it's simpler than adding a setting - // to enable it. - mergePolicy = new ShuffleForcedMergePolicy(mergePolicy); - } - - if (config().getIndexSettings().isMergeOnFlushEnabled()) { - final long maxFullFlushMergeWaitMillis = config().getIndexSettings().getMaxFullFlushMergeWaitTime().millis(); - if (maxFullFlushMergeWaitMillis > 0) { - iwc.setMaxFullFlushMergeWaitMillis(maxFullFlushMergeWaitMillis); - final Optional> mergeOnFlushPolicy = config().getIndexSettings().getMergeOnFlushPolicy(); - if (mergeOnFlushPolicy.isPresent()) { - mergePolicy = mergeOnFlushPolicy.get().apply(mergePolicy); - } - } - } else { - // Disable merge on refresh - iwc.setMaxFullFlushMergeWaitMillis(0); - } - - iwc.setCheckPendingFlushUpdate(config().getIndexSettings().isCheckPendingFlushEnabled()); - iwc.setMergePolicy(new OpenSearchMergePolicy(mergePolicy)); - iwc.setSimilarity(engineConfig.getSimilarity()); - iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac()); - iwc.setCodec(engineConfig.getCodec()); - iwc.setUseCompoundFile(engineConfig.useCompoundFile()); - if (config().getIndexSort() != null) { - iwc.setIndexSort(config().getIndexSort()); - } - if (config().getLeafSorter() != null) { - iwc.setLeafSorter(config().getLeafSorter()); // The default segment search order - } - - return new IndexWriterConfig(new StandardAnalyzer()); - } - - @Override - public TranslogManager translogManager() { - // ingestion engine does not have translog - return translogManager; - } - - @Override - protected SegmentInfos getLastCommittedSegmentInfos() { - return lastCommittedSegmentInfos; - } - - @Override - protected SegmentInfos getLatestSegmentInfos() { - throw new UnsupportedOperationException(); - } - - @Override - public String getHistoryUUID() { - return loadHistoryUUID(lastCommittedSegmentInfos.userData); - } - - @Override - public long getWritingBytes() { - return 0; - } - - @Override - public CompletionStats completionStats(String... fieldNamePatterns) { - return completionStatsCache.get(fieldNamePatterns); - } - - @Override - public long getIndexThrottleTimeInMillis() { - return 0; - } - - @Override - public boolean isThrottled() { - return false; - } - @Override public IndexResult index(Index index) throws IOException { assert Objects.equals(index.uid().field(), IdFieldMapper.NAME) : index.uid().field(); @@ -457,16 +167,6 @@ public GetResult get(Get get, BiFunction search return getFromSearcher(get, searcherFactory, SearcherScope.EXTERNAL); } - @Override - protected ReferenceManager getReferenceManager(SearcherScope scope) { - return externalReaderManager; - } - - @Override - public Closeable acquireHistoryRetentionLock() { - throw new UnsupportedOperationException("Not implemented"); - } - @Override public Translog.Snapshot newChangesSnapshot( String source, @@ -475,199 +175,36 @@ public Translog.Snapshot newChangesSnapshot( boolean requiredFullRange, boolean accurateCount ) throws IOException { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - public int countNumberOfHistoryOperations(String source, long fromSeqNo, long toSeqNumber) throws IOException { - return 0; - } - - @Override - public boolean hasCompleteOperationHistory(String reason, long startingSeqNo) { - return false; - } - - @Override - public long getMinRetainedSeqNo() { - return 0; - } - - @Override - public long getPersistedLocalCheckpoint() { - return 0; - } - - @Override - public long getProcessedLocalCheckpoint() { - return 0; - } - - @Override - public SeqNoStats getSeqNoStats(long globalCheckpoint) { - return null; - } - - @Override - public long getLastSyncedGlobalCheckpoint() { - return 0; - } - - @Override - public long getIndexBufferRAMBytesUsed() { - return 0; - } - - @Override - public List segments(boolean verbose) { - try (ReleasableLock lock = readLock.acquire()) { - Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos, verbose); - - // fill in the merges flag - Set onGoingMerges = mergeScheduler.onGoingMerges(); - for (OnGoingMerge onGoingMerge : onGoingMerges) { - for (SegmentCommitInfo segmentInfoPerCommit : onGoingMerge.getMergedSegments()) { - for (Segment segment : segmentsArr) { - if (segment.getName().equals(segmentInfoPerCommit.info.name)) { - segment.mergeId = onGoingMerge.getId(); - break; - } - } - } - } - return Arrays.asList(segmentsArr); - } - } - - @Override - public void refresh(String source) throws EngineException { - refresh(source, SearcherScope.EXTERNAL, true); - } - - final boolean refresh(String source, SearcherScope scope, boolean block) throws EngineException { - boolean refreshed; - try { - // refresh does not need to hold readLock as ReferenceManager can handle correctly if the engine is closed in mid-way. - if (store.tryIncRef()) { - // increment the ref just to ensure nobody closes the store during a refresh - try { - // even though we maintain 2 managers we really do the heavy-lifting only once. - // the second refresh will only do the extra work we have to do for warming caches etc. - ReferenceManager referenceManager = getReferenceManager(scope); - // it is intentional that we never refresh both internal / external together - if (block) { - referenceManager.maybeRefreshBlocking(); - refreshed = true; - } else { - refreshed = referenceManager.maybeRefresh(); - } - } finally { - store.decRef(); - } - } else { - refreshed = false; - } - } catch (AlreadyClosedException e) { - failOnTragicEvent(e); - throw e; - } catch (Exception e) { - try { - failEngine("refresh failed source[" + source + "]", e); - } catch (Exception inner) { - e.addSuppressed(inner); - } - throw new RefreshFailedEngineException(shardId, e); - } - // We check for pruning in each delete request, but we also prune here e.g. in case a delete burst comes in and then no more deletes - // for a long time: - maybePruneDeletes(); - // TODO: use OS merge scheduler - mergeScheduler.refreshConfig(); - return refreshed; - } - - @Override - public boolean maybeRefresh(String source) throws EngineException { - return refresh(source, SearcherScope.EXTERNAL, false); - } - - @Override - public void writeIndexingBuffer() throws EngineException { - refresh("write indexing buffer", SearcherScope.INTERNAL, false); - } - - @Override - public boolean shouldPeriodicallyFlush() { - return false; - } - - @Override - public void flush(boolean force, boolean waitIfOngoing) throws EngineException { - ensureOpen(); - if (force && waitIfOngoing == false) { - assert false : "wait_if_ongoing must be true for a force flush: force=" + force + " wait_if_ongoing=" + waitIfOngoing; - throw new IllegalArgumentException( - "wait_if_ongoing must be true for a force flush: force=" + force + " wait_if_ongoing=" + waitIfOngoing - ); - } - try (ReleasableLock lock = readLock.acquire()) { - ensureOpen(); - if (flushLock.tryLock() == false) { - // if we can't get the lock right away we block if needed otherwise barf - if (waitIfOngoing == false) { - return; - } - logger.trace("waiting for in-flight flush to finish"); - flushLock.lock(); - logger.trace("acquired flush lock after blocking"); - } else { - logger.trace("acquired flush lock immediately"); - } - try { - // Only flush if (1) Lucene has uncommitted docs, or (2) forced by caller, - // - // do we need to consider #3 and #4 as in InternalEngine? - // (3) the newly created commit points to a different translog generation (can free translog), - // or (4) the local checkpoint information in the last commit is stale, which slows down future recoveries. - boolean hasUncommittedChanges = indexWriter.hasUncommittedChanges(); - if (hasUncommittedChanges || force) { - logger.trace("starting commit for flush;"); - - // TODO: do we need to close the latest commit as done in InternalEngine? - commitIndexWriter(indexWriter); - - logger.trace("finished commit for flush"); - - // a temporary debugging to investigate test failure - issue#32827. Remove when the issue is resolved - logger.debug("new commit on flush, hasUncommittedChanges:{}, force:{}", hasUncommittedChanges, force); - - // we need to refresh in order to clear older version values - refresh("version_table_flush", SearcherScope.INTERNAL, true); - } - } catch (FlushFailedEngineException ex) { - maybeFailEngine("flush", ex); - throw ex; - } catch (IOException e) { - throw new FlushFailedEngineException(shardId, e); - } finally { - flushLock.unlock(); - } - } + return EMPTY_TRANSLOG_SNAPSHOT; } /** - * Commits the specified index writer. - * - * @param writer the index writer to commit + * This method is a copy of commitIndexWriter method from {@link InternalEngine} with some additions for ingestion + * source. */ - protected void commitIndexWriter(final IndexWriter writer) throws IOException { + @Override + protected void commitIndexWriter(final IndexWriter writer, final String translogUUID) throws IOException { try { + final long localCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); writer.setLiveCommitData(() -> { /* - * The user data captured the min and max range of the stream poller + * The user data captured above (e.g. local checkpoint) contains data that must be evaluated *before* Lucene flushes + * segments, including the local checkpoint amongst other values. The maximum sequence number is different, we never want + * the maximum sequence number to be less than the last sequence number to go into a Lucene commit, otherwise we run the + * risk of re-using a sequence number for two different documents when restoring from this commit point and subsequently + * writing new documents to the index. Since we only know which Lucene documents made it into the final commit after the + * {@link IndexWriter#commit()} call flushes all documents, we defer computation of the maximum sequence number to the time + * of invocation of the commit data iterator (which occurs after all documents have been flushed to Lucene). */ - final Map commitData = new HashMap<>(2); - + final Map commitData = new HashMap<>(7); + commitData.put(Translog.TRANSLOG_UUID_KEY, translogUUID); + commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(localCheckpoint)); + commitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(localCheckpointTracker.getMaxSeqNo())); + commitData.put(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp.get())); + commitData.put(HISTORY_UUID_KEY, historyUUID); + commitData.put(Engine.MIN_RETAINED_SEQNO, Long.toString(softDeletesPolicy.getMinRetainedSeqNo())); + + // ingestion engine needs to record batch start pointer commitData.put(StreamPoller.BATCH_START, streamPoller.getBatchStartPointer().asString()); final String currentForceMergeUUID = forceMergeUUID; if (currentForceMergeUUID != null) { @@ -676,6 +213,7 @@ protected void commitIndexWriter(final IndexWriter writer) throws IOException { logger.trace("committing writer with commit data [{}]", commitData); return commitData.entrySet().iterator(); }); + shouldPeriodicallyFlushAfterBigMerge.set(false); writer.commit(); } catch (final Exception ex) { try { @@ -703,268 +241,6 @@ protected void commitIndexWriter(final IndexWriter writer) throws IOException { } } - @Override - public MergeStats getMergeStats() { - return mergeScheduler.stats(); - } - - @Override - public void onSettingsChanged(TimeValue translogRetentionAge, ByteSizeValue translogRetentionSize, long softDeletesRetentionOps) { - mergeScheduler.refreshConfig(); - // TODO: do we need more? - } - - protected Map commitDataAsMap() { - return commitDataAsMap(indexWriter); - } - - /** - * Gets the commit data from {@link IndexWriter} as a map. - */ - protected static Map commitDataAsMap(final IndexWriter indexWriter) { - final Map commitData = new HashMap<>(8); - for (Map.Entry entry : indexWriter.getLiveCommitData()) { - commitData.put(entry.getKey(), entry.getValue()); - } - return commitData; - } - - @Override - public void forceMerge( - boolean flush, - int maxNumSegments, - boolean onlyExpungeDeletes, - boolean upgrade, - boolean upgradeOnlyAncientSegments, - String forceMergeUUID - ) throws EngineException, IOException { - /* - * We do NOT acquire the readlock here since we are waiting on the merges to finish - * that's fine since the IW.rollback should stop all the threads and trigger an IOException - * causing us to fail the forceMerge - * - * The way we implement upgrades is a bit hackish in the sense that we set an instance - * variable and that this setting will thus apply to the next forced merge that will be run. - * This is ok because (1) this is the only place we call forceMerge, (2) we have a single - * thread for optimize, and the 'optimizeLock' guarding this code, and (3) ConcurrentMergeScheduler - * syncs calls to findForcedMerges. - */ - assert indexWriter.getConfig().getMergePolicy() instanceof OpenSearchMergePolicy : "MergePolicy is " - + indexWriter.getConfig().getMergePolicy().getClass().getName(); - OpenSearchMergePolicy mp = (OpenSearchMergePolicy) indexWriter.getConfig().getMergePolicy(); - optimizeLock.lock(); - try { - ensureOpen(); - if (upgrade) { - logger.info("starting segment upgrade upgradeOnlyAncientSegments={}", upgradeOnlyAncientSegments); - mp.setUpgradeInProgress(true, upgradeOnlyAncientSegments); - } - store.incRef(); // increment the ref just to ensure nobody closes the store while we optimize - try { - if (onlyExpungeDeletes) { - assert upgrade == false; - indexWriter.forceMergeDeletes(true /* blocks and waits for merges*/); - } else if (maxNumSegments <= 0) { - assert upgrade == false; - indexWriter.maybeMerge(); - } else { - indexWriter.forceMerge(maxNumSegments, true /* blocks and waits for merges*/); - this.forceMergeUUID = forceMergeUUID; - } - if (flush) { - flush(false, true); - } - if (upgrade) { - logger.info("finished segment upgrade"); - } - } finally { - store.decRef(); - } - } catch (AlreadyClosedException ex) { - /* in this case we first check if the engine is still open. If so this exception is just fine - * and expected. We don't hold any locks while we block on forceMerge otherwise it would block - * closing the engine as well. If we are not closed we pass it on to failOnTragicEvent which ensures - * we are handling a tragic even exception here */ - ensureOpen(ex); - failOnTragicEvent(ex); - throw ex; - } catch (Exception e) { - try { - maybeFailEngine(FORCE_MERGE, e); - } catch (Exception inner) { - e.addSuppressed(inner); - } - throw e; - } finally { - try { - // reset it just to make sure we reset it in a case of an error - mp.setUpgradeInProgress(false, false); - } finally { - optimizeLock.unlock(); - } - } - } - - @Override - public GatedCloseable acquireLastIndexCommit(boolean flushFirst) throws EngineException { - store.incRef(); - try { - var reader = getReferenceManager(SearcherScope.INTERNAL).acquire(); - return new GatedCloseable<>(reader.getIndexCommit(), () -> { - store.decRef(); - getReferenceManager(SearcherScope.INTERNAL).release(reader); - }); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @Override - public GatedCloseable acquireSafeIndexCommit() throws EngineException { - // TODO: do we need this? likely not - return acquireLastIndexCommit(false); - } - - @Override - public SafeCommitInfo getSafeCommitInfo() { - // TODO: do we need this? - return SafeCommitInfo.EMPTY; - } - - @Override - protected void closeNoLock(String reason, CountDownLatch closedLatch) { - if (isClosed.compareAndSet(false, true)) { - assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() - : "Either the write lock must be held or the engine must be currently be failing itself"; - try { - try { - IOUtils.close(externalReaderManager, internalReaderManager); - } catch (Exception e) { - logger.warn("Failed to close ReaderManager", e); - } - - // no need to commit in this case!, we snapshot before we close the shard, so translog and all sync'ed - logger.trace("rollback indexWriter"); - try { - indexWriter.rollback(); - } catch (AlreadyClosedException ex) { - failOnTragicEvent(ex); - throw ex; - } - logger.trace("rollback indexWriter done"); - } catch (Exception e) { - logger.warn("failed to rollback writer on close", e); - } finally { - try { - store.decRef(); - logger.debug("engine closed [{}]", reason); - } finally { - closedLatch.countDown(); - } - } - } - } - - private boolean failOnTragicEvent(AlreadyClosedException ex) { - final boolean engineFailed; - // if we are already closed due to some tragic exception - // we need to fail the engine. it might have already been failed before - // but we are double-checking it's failed and closed - if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) { - final Exception tragicException; - if (indexWriter.getTragicException() instanceof Exception) { - tragicException = (Exception) indexWriter.getTragicException(); - } else { - tragicException = new RuntimeException(indexWriter.getTragicException()); - } - failEngine("already closed by tragic event on the index writer", tragicException); - engineFailed = true; - } else if (failedEngine.get() == null && isClosed.get() == false) { // we are closed but the engine is not failed yet? - // this smells like a bug - we only expect ACE if we are in a fatal case ie. either translog or IW is closed by - // a tragic event or has closed itself. if that is not the case we are in a buggy state and raise an assertion error - throw new AssertionError("Unexpected AlreadyClosedException", ex); - } else { - engineFailed = false; - } - return engineFailed; - } - - private final class EngineMergeScheduler extends OpenSearchConcurrentMergeScheduler { - private final AtomicInteger numMergesInFlight = new AtomicInteger(0); - private final AtomicBoolean isThrottling = new AtomicBoolean(); - - EngineMergeScheduler(ShardId shardId, IndexSettings indexSettings) { - super(shardId, indexSettings); - } - - @Override - public synchronized void beforeMerge(OnGoingMerge merge) { - int maxNumMerges = mergeScheduler.getMaxMergeCount(); - if (numMergesInFlight.incrementAndGet() > maxNumMerges) { - if (isThrottling.getAndSet(true) == false) { - logger.info("now throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges); - activateThrottling(); - } - } - } - - @Override - public synchronized void afterMerge(OnGoingMerge merge) { - int maxNumMerges = mergeScheduler.getMaxMergeCount(); - if (numMergesInFlight.decrementAndGet() < maxNumMerges) { - if (isThrottling.getAndSet(false)) { - logger.info("stop throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges); - deactivateThrottling(); - } - } - if (indexWriter.hasPendingMerges() == false - && System.nanoTime() - lastWriteNanos >= engineConfig.getFlushMergesAfter().nanos()) { - // NEVER do this on a merge thread since we acquire some locks blocking here and if we concurrently rollback the writer - // we deadlock on engine#close for instance. - engineConfig.getThreadPool().executor(ThreadPool.Names.FLUSH).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - if (isClosed.get() == false) { - logger.warn("failed to flush after merge has finished"); - } - } - - @Override - protected void doRun() { - // if we have no pending merges and we are supposed to flush once merges have finished to - // free up transient disk usage of the (presumably biggish) segments that were just merged - flush(); - } - }); - } else if (merge.getTotalBytesSize() >= engineConfig.getIndexSettings().getFlushAfterMergeThresholdSize().getBytes()) { - // we hit a significant merge which would allow us to free up memory if we'd commit it hence on the next change - // we should execute a flush on the next operation if that's a flush after inactive or indexing a document. - // we could fork a thread and do it right away but we try to minimize forking and piggyback on outside events. - shouldPeriodicallyFlushAfterBigMerge.set(true); - } - } - - @Override - protected void handleMergeException(final Throwable exc) { - engineConfig.getThreadPool().generic().execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - logger.debug("merge failure action rejected", e); - } - - @Override - protected void doRun() throws Exception { - /* - * We do this on another thread rather than the merge thread that we are initially called on so that we have complete - * confidence that the call stack does not contain catch statements that would cause the error that might be thrown - * here from being caught and never reaching the uncaught exception handler. - */ - failEngine(MERGE_FAILED, new MergePolicy.MergeException(exc)); - } - }); - } - } - @Override public void activateThrottling() { // TODO: add this when we have a thread pool for indexing in parallel @@ -975,38 +251,41 @@ public void deactivateThrottling() { // TODO: is this needed? } - @Override - public int fillSeqNoGaps(long primaryTerm) throws IOException { - // TODO: is this needed? - return 0; - } - @Override public void maybePruneDeletes() { // no need to prune deletes in ingestion engine } @Override - public void updateMaxUnsafeAutoIdTimestamp(long newTimestamp) { - // TODO: is this needed? + public void close() throws IOException { + if (streamPoller != null) { + streamPoller.close(); + } + super.close(); } - @Override - public long getMaxSeqNoOfUpdatesOrDeletes() { - // TODO: is this needed? - return 0; + public DocumentMapperForType getDocumentMapperForType() { + return documentMapperForType; } @Override - public void advanceMaxSeqNoOfUpdatesOrDeletes(long maxSeqNoOfUpdatesOnPrimary) { - // TODO: is this needed? + protected TranslogManager createTranslogManager( + String translogUUID, + TranslogDeletionPolicy translogDeletionPolicy, + CompositeTranslogEventListener translogEventListener + ) throws IOException { + return new NoOpTranslogManager( + shardId, + readLock, + this::ensureOpen, + new TranslogStats(), + EMPTY_TRANSLOG_SNAPSHOT, + translogUUID, + true + ); } - @Override - public void close() throws IOException { - if (streamPoller != null) { - streamPoller.close(); - } - super.close(); + protected Map commitDataAsMap() { + return commitDataAsMap(indexWriter); } } diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index ff790fa1513f1..064e757c6ebb7 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -144,16 +144,28 @@ */ public class InternalEngine extends Engine { + /** + * UUID value that is updated every time the engine is force merged. + */ + @Nullable + protected volatile String forceMergeUUID; + /** * When we last pruned expired tombstones from versionMap.deletes: */ private volatile long lastDeleteVersionPruneTimeMSec; - private final InternalTranslogManager translogManager; - private final OpenSearchConcurrentMergeScheduler mergeScheduler; + protected final TranslogManager translogManager; + protected final IndexWriter indexWriter; + protected final LocalCheckpointTracker localCheckpointTracker; + protected final AtomicLong maxUnsafeAutoIdTimestamp = new AtomicLong(-1); + protected final SoftDeletesPolicy softDeletesPolicy; + protected final AtomicBoolean shouldPeriodicallyFlushAfterBigMerge = new AtomicBoolean(false); - private final IndexWriter indexWriter; + @Nullable + protected final String historyUUID; + private final OpenSearchConcurrentMergeScheduler mergeScheduler; private final ExternalReaderManager externalReaderManager; private final OpenSearchReaderManager internalReaderManager; @@ -168,15 +180,12 @@ public class InternalEngine extends Engine { private final IndexThrottle throttle; - private final LocalCheckpointTracker localCheckpointTracker; - private final CombinedDeletionPolicy combinedDeletionPolicy; // How many callers are currently requesting index throttling. Currently there are only two situations where we do this: when merges // are falling behind and when writing indexing buffer to disk is too slow. When this is 0, there is no throttling, else we throttling // incoming indexing ops to a single thread: private final AtomicInteger throttleRequestCount = new AtomicInteger(); - private final AtomicLong maxUnsafeAutoIdTimestamp = new AtomicLong(-1); private final AtomicLong maxSeenAutoIdTimestamp = new AtomicLong(-1); // max_seq_no_of_updates_or_deletes tracks the max seq_no of update or delete operations that have been processed in this engine. // An index request is considered as an update if it overwrites existing documents with the same docId in the Lucene index. @@ -189,14 +198,12 @@ public class InternalEngine extends Engine { private final CounterMetric numDocAppends = new CounterMetric(); private final CounterMetric numDocUpdates = new CounterMetric(); private final NumericDocValuesField softDeletesField = Lucene.newSoftDeletesField(); - private final SoftDeletesPolicy softDeletesPolicy; private final LastRefreshedCheckpointListener lastRefreshedCheckpointListener; private final CompletionStatsCache completionStatsCache; private final AtomicBoolean trackTranslogLocation = new AtomicBoolean(false); private final KeyedLock noOpKeyedLock = new KeyedLock<>(); - private final AtomicBoolean shouldPeriodicallyFlushAfterBigMerge = new AtomicBoolean(false); /** * If multiple writes passed {@link InternalEngine#tryAcquireInFlightDocs(Operation, int)} but they haven't adjusted @@ -210,15 +217,6 @@ public class InternalEngine extends Engine { private final int maxDocs; - @Nullable - private final String historyUUID; - - /** - * UUID value that is updated every time the engine is force merged. - */ - @Nullable - private volatile String forceMergeUUID; - public InternalEngine(EngineConfig engineConfig) { this(engineConfig, IndexWriter.MAX_DOCS, LocalCheckpointTracker::new, TranslogEventListener.NOOP_TRANSLOG_EVENT_LISTENER); } @@ -249,7 +247,7 @@ public TranslogManager translogManager() { ExternalReaderManager externalReaderManager = null; OpenSearchReaderManager internalReaderManager = null; EngineMergeScheduler scheduler = null; - InternalTranslogManager translogManagerRef = null; + TranslogManager translogManagerRef = null; boolean success = false; try { this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); @@ -280,20 +278,11 @@ public void onFailure(String reason, Exception ex) { } } }; - translogManagerRef = new InternalTranslogManager( - engineConfig.getTranslogConfig(), - engineConfig.getPrimaryTermSupplier(), - engineConfig.getGlobalCheckpointSupplier(), - translogDeletionPolicy, - shardId, - readLock, - this::getLocalCheckpointTracker, - translogUUID, - new CompositeTranslogEventListener(Arrays.asList(internalTranslogEventListener, translogEventListener), shardId), - this::ensureOpen, - engineConfig.getTranslogFactory(), - engineConfig.getStartedPrimarySupplier() + CompositeTranslogEventListener compositeTranslogEventListener = new CompositeTranslogEventListener( + Arrays.asList(internalTranslogEventListener, translogEventListener), + shardId ); + translogManagerRef = createTranslogManager(translogUUID, translogDeletionPolicy, compositeTranslogEventListener); this.translogManager = translogManagerRef; this.softDeletesPolicy = newSoftDeletesPolicy(); this.combinedDeletionPolicy = new CombinedDeletionPolicy( @@ -362,6 +351,27 @@ public void onFailure(String reason, Exception ex) { logger.trace("created new InternalEngine"); } + protected TranslogManager createTranslogManager( + String translogUUID, + TranslogDeletionPolicy translogDeletionPolicy, + CompositeTranslogEventListener translogEventListener + ) throws IOException { + return new InternalTranslogManager( + engineConfig.getTranslogConfig(), + engineConfig.getPrimaryTermSupplier(), + engineConfig.getGlobalCheckpointSupplier(), + translogDeletionPolicy, + shardId, + readLock, + this::getLocalCheckpointTracker, + translogUUID, + translogEventListener, + this::ensureOpen, + engineConfig.getTranslogFactory(), + engineConfig.getStartedPrimarySupplier() + ); + } + private LocalCheckpointTracker createLocalCheckpointTracker( BiFunction localCheckpointTrackerSupplier ) throws IOException { @@ -2773,7 +2783,7 @@ public Closeable acquireHistoryRetentionLock() { /** * Gets the commit data from {@link IndexWriter} as a map. */ - private static Map commitDataAsMap(final IndexWriter indexWriter) { + protected static Map commitDataAsMap(final IndexWriter indexWriter) { final Map commitData = new HashMap<>(8); for (Map.Entry entry : indexWriter.getLiveCommitData()) { commitData.put(entry.getKey(), entry.getValue()); diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index e2210217672ef..d2c81c4274ebd 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -21,7 +21,6 @@ import org.opensearch.index.translog.listener.TranslogEventListener; import org.opensearch.index.translog.transfer.TranslogUploadFailedException; -import java.io.Closeable; import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BooleanSupplier; @@ -31,12 +30,12 @@ import java.util.stream.Stream; /** - * The {@link TranslogManager} implementation capable of orchestrating all read/write {@link Translog} operations while - * interfacing with the {@link org.opensearch.index.engine.InternalEngine} + * The {@link TranslogManager} implementation capable of orchestrating all read/write {@link Translog} operations for + * the {@link org.opensearch.index.engine.InternalEngine} * * @opensearch.internal */ -public class InternalTranslogManager implements TranslogManager, Closeable { +public class InternalTranslogManager implements TranslogManager { private final ReleasableLock readLock; private final LifecycleAware engineLifeCycleAware; diff --git a/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java index b4aa7865570a6..7ae80f88b0595 100644 --- a/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java @@ -11,6 +11,7 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.seqno.SequenceNumbers; import java.io.IOException; import java.util.stream.Stream; @@ -27,6 +28,9 @@ public class NoOpTranslogManager implements TranslogManager { private final Runnable ensureOpen; private final ShardId shardId; private final TranslogStats translogStats; + private final TranslogDeletionPolicy translogDeletionPolicy; + private final String translogUUID; + private final boolean skipRecoveryStep; public NoOpTranslogManager( ShardId shardId, @@ -34,12 +38,27 @@ public NoOpTranslogManager( Runnable ensureOpen, TranslogStats translogStats, Translog.Snapshot emptyTranslogSnapshot + ) throws IOException { + this(shardId, readLock, ensureOpen, translogStats, emptyTranslogSnapshot, "", false); + } + + public NoOpTranslogManager( + ShardId shardId, + ReleasableLock readLock, + Runnable ensureOpen, + TranslogStats translogStats, + Translog.Snapshot emptyTranslogSnapshot, + String translogUUID, + boolean skipRecoveryStep ) throws IOException { this.emptyTranslogSnapshot = emptyTranslogSnapshot; this.readLock = readLock; this.shardId = shardId; this.ensureOpen = ensureOpen; this.translogStats = translogStats; + this.translogDeletionPolicy = new DefaultTranslogDeletionPolicy(0, 0, 0); + this.translogUUID = translogUUID; + this.skipRecoveryStep = skipRecoveryStep; } @Override @@ -48,6 +67,11 @@ public void rollTranslogGeneration() throws TranslogException {} @Override public int recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long localCheckpoint, long recoverUpToSeqNo) throws IOException { + // skip translog recovery attempt when skipRecoveryStep is true + if (skipRecoveryStep) { + return 0; + } + try (ReleasableLock ignored = readLock.acquire()) { ensureOpen.run(); try (Translog.Snapshot snapshot = emptyTranslogSnapshot) { @@ -132,6 +156,42 @@ public Releasable drainSync() { @Override public Translog.TranslogGeneration getTranslogGeneration() { + return new Translog.TranslogGeneration(translogUUID, 0); + } + + @Override + public long getLastSyncedGlobalCheckpoint() { + return 0; + } + + @Override + public long getMaxSeqNo() { + return SequenceNumbers.NO_OPS_PERFORMED; + } + + @Override + public void trimUnreferencedReaders() throws IOException {} + + @Override + public boolean shouldPeriodicallyFlush(long localCheckpointOfLastCommit, long flushThreshold) { + return false; + } + + @Override + public Exception getTragicExceptionIfClosed() { return null; } + + @Override + public TranslogDeletionPolicy getDeletionPolicy() { + return translogDeletionPolicy; + } + + @Override + public String getTranslogUUID() { + return translogUUID; + } + + @Override + public void close() throws IOException {} } diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index ffda06d8d8292..b1e88624c9906 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -899,6 +899,8 @@ public TranslogDeletionPolicy getDeletionPolicy() { return deletionPolicy; } + public static final Translog.Location EMPTY_TRANSLOG_LOCATION = new Translog.Location(0, 0, 0); + /** * Location in the translot * diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java index e1a0b7d1c1293..ec312636e7ee1 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java @@ -11,6 +11,7 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; +import java.io.Closeable; import java.io.IOException; import java.util.stream.Stream; @@ -20,7 +21,7 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public interface TranslogManager { +public interface TranslogManager extends Closeable { /** * Rolls the translog generation and cleans unneeded. @@ -142,4 +143,46 @@ public interface TranslogManager { Releasable drainSync(); Translog.TranslogGeneration getTranslogGeneration(); + + /** + * Retrieves last synced global checkpoint. + */ + long getLastSyncedGlobalCheckpoint(); + + /** + * Retrieves the max seq no. + */ + long getMaxSeqNo(); + + /** + * Trims unreferenced translog generations by asking {@link TranslogDeletionPolicy} for the minimum required + * generation. + */ + void trimUnreferencedReaders() throws IOException; + + /** + * + * @param localCheckpointOfLastCommit local checkpoint reference of last commit to translog + * @param flushThreshold threshold to flush to translog + * @return if the translog should be flushed + */ + boolean shouldPeriodicallyFlush(long localCheckpointOfLastCommit, long flushThreshold); + + /** + * Retrieves the underlying translog tragic exception + * @return the tragic exception + */ + Exception getTragicExceptionIfClosed(); + + /** + * Retrieves the translog deletion policy + * @return TranslogDeletionPolicy + */ + TranslogDeletionPolicy getDeletionPolicy(); + + /** + * Retrieves the translog unique identifier + * @return the uuid of the translog + */ + String getTranslogUUID(); } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/IngestionEngineFactory.java b/server/src/main/java/org/opensearch/indices/pollingingest/IngestionEngineFactory.java index e124adb90365b..16688feddf53c 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/IngestionEngineFactory.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/IngestionEngineFactory.java @@ -13,6 +13,7 @@ import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.engine.IngestionEngine; +import org.opensearch.index.engine.NRTReplicationEngine; import java.util.Objects; @@ -29,6 +30,10 @@ public IngestionEngineFactory(IngestionConsumerFactory ingestionConsumerFactory) @Override public Engine newReadWriteEngine(EngineConfig config) { + if (config.isReadOnlyReplica()) { + return new NRTReplicationEngine(config); + } + IngestionEngine ingestionEngine = new IngestionEngine(config, ingestionConsumerFactory); ingestionEngine.start(); return ingestionEngine; diff --git a/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java b/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java index 19718384bd926..2d00bbcba0c8c 100644 --- a/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java @@ -36,8 +36,9 @@ import java.util.concurrent.atomic.AtomicLong; import static org.awaitility.Awaitility.await; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; public class IngestionEngineTests extends EngineTestCase { @@ -46,6 +47,7 @@ public class IngestionEngineTests extends EngineTestCase { private IngestionEngine ingestionEngine; // the messages of the stream to ingest from private List messages; + private EngineConfig engineConfig; @Override @Before @@ -86,6 +88,7 @@ public void tearDown() throws Exception { ingestionEngineStore.close(); } super.tearDown(); + engineConfig = null; } public void testCreateEngine() throws IOException { @@ -95,7 +98,7 @@ public void testCreateEngine() throws IOException { ingestionEngine.flush(false, true); Map commitData = ingestionEngine.commitDataAsMap(); // verify the commit data - Assert.assertEquals(1, commitData.size()); + Assert.assertEquals(7, commitData.size()); Assert.assertEquals("2", commitData.get(StreamPoller.BATCH_START)); // verify the stored offsets @@ -120,21 +123,19 @@ public void testRecovery() throws IOException { publishData("{\"_id\":\"3\",\"_source\":{\"name\":\"john\", \"age\": 30}}"); publishData("{\"_id\":\"4\",\"_source\":{\"name\":\"jane\", \"age\": 25}}"); ingestionEngine.close(); - ingestionEngine = buildIngestionEngine(new AtomicLong(2), ingestionEngineStore, indexSettings); + ingestionEngine = buildIngestionEngine(new AtomicLong(0), ingestionEngineStore, indexSettings); waitForResults(ingestionEngine, 4); } public void testCreationFailure() throws IOException { - // Simulate an error scenario - Store mockStore = mock(Store.class); - doThrow(new IOException("Simulated IOException")).when(mockStore).readLastCommittedSegmentsInfo(); - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - FakeIngestionSource.FakeIngestionConsumerFactory consumerFactory = new FakeIngestionSource.FakeIngestionConsumerFactory(messages); + Store mockStore = spy(store); + doThrow(new IOException("Simulated IOException")).when(mockStore).trimUnsafeCommits(any()); + EngineConfig engineConfig = config( indexSettings, - store, + mockStore, createTempDir(), NoMergePolicy.INSTANCE, null, @@ -156,7 +157,9 @@ public void testCreationFailure() throws IOException { private IngestionEngine buildIngestionEngine(AtomicLong globalCheckpoint, Store store, IndexSettings settings) throws IOException { FakeIngestionSource.FakeIngestionConsumerFactory consumerFactory = new FakeIngestionSource.FakeIngestionConsumerFactory(messages); - EngineConfig engineConfig = config(settings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get); + if (engineConfig == null) { + engineConfig = config(settings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get); + } // overwrite the config with ingestion engine settings String mapping = "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}"; MapperService mapperService = createMapperService(mapping); diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 4f04c0b08fd0a..f9a09c088095b 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -98,6 +98,7 @@ import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineTestCase; +import org.opensearch.index.engine.IngestionEngine; import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.seqno.SequenceNumbers; @@ -1378,7 +1379,9 @@ private void assertOpenTranslogReferences() throws Exception { for (IndexService indexService : indexServices) { for (IndexShard indexShard : indexService) { try { - if (IndexShardTestCase.getEngine(indexShard) instanceof InternalEngine) { + if (IndexShardTestCase.getEngine(indexShard) instanceof IngestionEngine) { + // no-op, as IngestionEngine does not use translog. + } else if (IndexShardTestCase.getEngine(indexShard) instanceof InternalEngine) { IndexShardTestCase.getTranslog(indexShard).getDeletionPolicy().assertNoOpenTranslogRefs(); } } catch (AlreadyClosedException ok) { From b1e66b34f2f29952ac04a411267562a3dbc54976 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Thu, 27 Feb 2025 14:48:51 -0800 Subject: [PATCH 29/48] Move TSC took-time policy to guard both heap and disk tier (#17190) * Move TSC took-time policy to guard both heap and disk tier Signed-off-by: Peter Alfonsi * changelog Signed-off-by: Peter Alfonsi * spotless apply Signed-off-by: Peter Alfonsi * Addressed Sagar's comment Signed-off-by: Peter Alfonsi * Add missing javadoc Signed-off-by: Peter Alfonsi * address round 2 of comments Signed-off-by: Peter Alfonsi * Add removal notification to put() Signed-off-by: Peter Alfonsi * Fix incorrect stats hit when cache entry rejected by policy Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * Fixed more broken stats Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * Addressed more comments Signed-off-by: Peter Alfonsi * make policy rejections count as neither hit or miss Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * remove potential double-loading Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * remove removalNotification Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi --------- Signed-off-by: Peter Alfonsi Signed-off-by: Peter Alfonsi Co-authored-by: Peter Alfonsi --- CHANGELOG.md | 1 + .../common/tier/TieredSpilloverCacheIT.java | 88 ++++- .../tier/TieredSpilloverCacheStatsIT.java | 85 ++-- .../cache/common/policy/TookTimePolicy.java | 14 +- .../common/tier/TieredSpilloverCache.java | 147 ++++--- .../tier/TieredSpilloverCachePlugin.java | 2 + .../tier/TieredSpilloverCacheSettings.java | 40 +- .../common/policy/TookTimePolicyTests.java | 46 ++- .../tier/TieredSpilloverCacheTests.java | 364 +++++++++++++++--- 9 files changed, 579 insertions(+), 208 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6aa18ce0064ae..bd218393919bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Changed - Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233) - Increase force merge threads to 1/8th of cores [#17255](https://github.com/opensearch-project/OpenSearch/pull/17255) +- TieredSpilloverCache took-time threshold now guards heap tier as well as disk tier [#17190](https://github.com/opensearch-project/OpenSearch/pull/17190) ### Deprecated diff --git a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheIT.java b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheIT.java index f0ea21bde187e..08458cd2a054d 100644 --- a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheIT.java +++ b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheIT.java @@ -118,7 +118,7 @@ public void testSanityChecksWithIndicesRequestCache() throws InterruptedExceptio ); } - public void testWithDynamicTookTimePolicyWithMultiSegments() throws Exception { + public void testWithDynamicDiskTookTimePolicyWithMultiSegments() throws Exception { int numberOfSegments = getNumberOfSegments(); int onHeapCacheSizePerSegmentInBytes = 800; // Per cache entry below is around ~700 bytes, so keeping this // just a bit higher so that each segment can atleast hold 1 entry. @@ -139,12 +139,13 @@ public void testWithDynamicTookTimePolicyWithMultiSegments() throws Exception { ) .get() ); - // Set a very high value for took time policy so that no items evicted from onHeap cache are spilled + // Set a very high value for took time disk policy so that no items evicted from onHeap cache are spilled // to disk. And then hit requests so that few items are cached into cache. ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( Settings.builder() .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), new TimeValue(100, TimeUnit.SECONDS) ) .build() @@ -182,12 +183,13 @@ public void testWithDynamicTookTimePolicyWithMultiSegments() throws Exception { assertEquals(0, requestCacheStats.getHitCount()); long lastEvictionSeen = requestCacheStats.getEvictions(); - // Decrease took time policy to zero so that disk cache also comes into play. Now we should be able + // Decrease disk took time policy to zero so that disk cache also comes into play. Now we should be able // to cache all entries. updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( Settings.builder() .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), new TimeValue(0, TimeUnit.MILLISECONDS) ) .build() @@ -206,7 +208,7 @@ public void testWithDynamicTookTimePolicyWithMultiSegments() throws Exception { assertEquals(lastEvictionSeen, requestCacheStats.getEvictions()); } - public void testWithDynamicTookTimePolicy() throws Exception { + public void testWithDynamicHeapTookTimePolicy() throws Exception { int onHeapCacheSizeInBytes = 2000; internalCluster().startNode(Settings.builder().put(defaultSettings(onHeapCacheSizeInBytes + "b", 1)).build()); Client client = client(); @@ -224,8 +226,7 @@ public void testWithDynamicTookTimePolicy() throws Exception { ) .get() ); - // Step 1 : Set a very high value for took time policy so that no items evicted from onHeap cache are spilled - // to disk. And then hit requests so that few items are cached into cache. + // Set a high threshold for the overall cache took time policy so nothing will enter the cache. ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( Settings.builder() .put( @@ -245,6 +246,57 @@ public void testWithDynamicTookTimePolicy() throws Exception { ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); long perQuerySizeInCacheInBytes = -1; + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + assertSearchResponse(resp); + } + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + assertEquals(0, requestCacheStats.getEvictions()); + } + + public void testWithDynamicDiskTookTimePolicy() throws Exception { + int onHeapCacheSizeInBytes = 2000; + internalCluster().startNode(Settings.builder().put(defaultSettings(onHeapCacheSizeInBytes + "b", 1)).build()); + Client client = client(); + assertAcked( + client.admin() + .indices() + .prepareCreate("index") + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", -1) + ) + .get() + ); + // Step 1 : Set a very high value for disk took time policy so that no items evicted from onHeap cache are spilled + // to disk. And then hit requests so that few items are cached into cache. + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( + Settings.builder() + .put( + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), + new TimeValue(100, TimeUnit.SECONDS) + ) + .build() + ); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).get()); + int numberOfIndexedItems = randomIntBetween(6, 10); + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + indexRandom(true, client.prepareIndex("index").setSource("k" + iterator, "hello" + iterator)); + } + ensureSearchable("index"); + refreshAndWaitForReplication(); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); + long perQuerySizeInCacheInBytes = -1; for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { SearchResponse resp = client.prepareSearch("index") .setRequestCache(true) @@ -282,12 +334,13 @@ public void testWithDynamicTookTimePolicy() throws Exception { assertEquals(0, requestCacheStats.getHitCount()); long lastEvictionSeen = requestCacheStats.getEvictions(); - // Step 3: Decrease took time policy to zero so that disk cache also comes into play. Now we should be able + // Step 3: Decrease disk took time policy to zero so that disk cache also comes into play. Now we should be able // to cache all entries. updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( Settings.builder() .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), new TimeValue(0, TimeUnit.MILLISECONDS) ) .build() @@ -352,11 +405,12 @@ public void testInvalidationWithIndicesRequestCache() throws Exception { ) .get() ); - // Update took time policy to zero so that all entries are eligible to be cached on disk. + // Update disk took time policy to zero so that all entries are eligible to be cached on disk. ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( Settings.builder() .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), new TimeValue(0, TimeUnit.MILLISECONDS) ) .build() @@ -437,11 +491,12 @@ public void testWithExplicitCacheClear() throws Exception { ) .get() ); - // Update took time policy to zero so that all entries are eligible to be cached on disk. + // Update disk took time policy to zero so that all entries are eligible to be cached on disk. ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( Settings.builder() .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), new TimeValue(0, TimeUnit.MILLISECONDS) ) .build() @@ -512,11 +567,12 @@ public void testWithDynamicDiskCacheSetting() throws Exception { ) .get() ); - // Update took time policy to zero so that all entries are eligible to be cached on disk. + // Update disk took time policy to zero so that all entries are eligible to be cached on disk. ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( Settings.builder() .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), new TimeValue(0, TimeUnit.MILLISECONDS) ) .build() diff --git a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java index fa10f4185521a..c72fc0d529c03 100644 --- a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java +++ b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java @@ -62,16 +62,7 @@ protected Collection> nodePlugins() { * Test aggregating by indices */ public void testIndicesLevelAggregation() throws Exception { - internalCluster().startNodes( - 1, - Settings.builder() - .put(defaultSettings(HEAP_CACHE_SIZE_STRING, 1)) - .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), - new TimeValue(0, TimeUnit.SECONDS) - ) - .build() - ); + startNodesDefaultSettings(); Client client = client(); Map values = setupCacheForAggregationTests(client); @@ -115,16 +106,7 @@ public void testIndicesLevelAggregation() throws Exception { * Test aggregating by indices and tier */ public void testIndicesAndTierLevelAggregation() throws Exception { - internalCluster().startNodes( - 1, - Settings.builder() - .put(defaultSettings(HEAP_CACHE_SIZE_STRING, 1)) - .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), - new TimeValue(0, TimeUnit.SECONDS) - ) - .build() - ); + startNodesDefaultSettings(); Client client = client(); Map values = setupCacheForAggregationTests(client); @@ -195,16 +177,7 @@ public void testIndicesAndTierLevelAggregation() throws Exception { * Test aggregating by tier only */ public void testTierLevelAggregation() throws Exception { - internalCluster().startNodes( - 1, - Settings.builder() - .put(defaultSettings(HEAP_CACHE_SIZE_STRING, 1)) - .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), - new TimeValue(0, TimeUnit.SECONDS) - ) - .build() - ); + startNodesDefaultSettings(); Client client = client(); Map values = setupCacheForAggregationTests(client); // Get values for tiers alone and check they add correctly across indices @@ -236,16 +209,7 @@ public void testTierLevelAggregation() throws Exception { } public void testInvalidLevelsAreIgnored() throws Exception { - internalCluster().startNodes( - 1, - Settings.builder() - .put(defaultSettings(HEAP_CACHE_SIZE_STRING, getNumberOfSegments())) - .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), - new TimeValue(0, TimeUnit.SECONDS) - ) - .build() - ); + startNodesDefaultSettings(); Client client = client(); Map values = setupCacheForAggregationTests(client); @@ -287,16 +251,7 @@ public void testInvalidLevelsAreIgnored() throws Exception { * Check the new stats API returns the same values as the old stats API. */ public void testStatsMatchOldApi() throws Exception { - internalCluster().startNodes( - 1, - Settings.builder() - .put(defaultSettings(HEAP_CACHE_SIZE_STRING, getNumberOfSegments())) - .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), - new TimeValue(0, TimeUnit.SECONDS) - ) - .build() - ); + startNodesDefaultSettings(); String index = "index"; Client client = client(); startIndex(client, index); @@ -354,7 +309,12 @@ public void testStatsWithMultipleSegments() throws Exception { .put(defaultSettings(heap_cache_size_per_segment * numberOfSegments + "B", numberOfSegments)) .put( TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), - new TimeValue(0, TimeUnit.SECONDS) + TimeValue.ZERO + ) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), + TimeValue.ZERO ) .build() ); @@ -429,6 +389,11 @@ public void testClosingShard() throws Exception { TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), new TimeValue(0, TimeUnit.SECONDS) ) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), + new TimeValue(0, TimeUnit.SECONDS) + ) .put(INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), new TimeValue(1)) .build() ); @@ -631,4 +596,22 @@ private static ImmutableCacheStatsHolder getNodeCacheStatsResult(Client client, NodeCacheStats ncs = nodeStatsResponse.getNodes().get(0).getNodeCacheStats(); return ncs.getStatsByCache(CacheType.INDICES_REQUEST_CACHE); } + + private void startNodesDefaultSettings() { + internalCluster().startNodes( + 1, + Settings.builder() + .put(defaultSettings(HEAP_CACHE_SIZE_STRING, 1)) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + TimeValue.ZERO + ) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), + TimeValue.ZERO + ) + .build() + ); + } } diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/TookTimePolicy.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/TookTimePolicy.java index 4bc26803acf4c..620b5597086f4 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/TookTimePolicy.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/TookTimePolicy.java @@ -13,16 +13,14 @@ package org.opensearch.cache.common.policy; -import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.policy.CachedQueryResult; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; import java.util.function.Function; import java.util.function.Predicate; -import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; - /** * A cache tier policy which accepts queries whose took time is greater than some threshold. * The threshold should be set to approximately the time it takes to get a result from the cache tier. @@ -46,20 +44,20 @@ public class TookTimePolicy implements Predicate { * @param threshold the threshold * @param cachedResultParser the function providing policy values * @param clusterSettings cluster settings - * @param cacheType cache type + * @param targetSetting the cluster setting to register a consumer with */ public TookTimePolicy( TimeValue threshold, Function cachedResultParser, ClusterSettings clusterSettings, - CacheType cacheType + Setting targetSetting ) { if (threshold.compareTo(TimeValue.ZERO) < 0) { throw new IllegalArgumentException("Threshold for TookTimePolicy must be >= 0ms but was " + threshold.getStringRep()); } this.threshold = threshold; this.cachedResultParser = cachedResultParser; - clusterSettings.addSettingsUpdateConsumer(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType), this::setThreshold); + clusterSettings.addSettingsUpdateConsumer(targetSetting, this::setThreshold); } private void setThreshold(TimeValue threshold) { @@ -72,6 +70,10 @@ private void setThreshold(TimeValue threshold) { * @return whether to admit the data */ public boolean test(V data) { + if (threshold.equals(TimeValue.ZERO)) { + // Skip parsing the took time if this threshold is zero. + return true; + } long tookTimeNanos; try { tookTimeNanos = cachedResultParser.apply(data).getTookTimeNanos(); diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 9879235812377..d968e61cffcff 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -53,6 +53,8 @@ import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_SIZE; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_SEGMENTS; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK; import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP; import static org.opensearch.common.cache.settings.CacheSettings.INVALID_SEGMENT_COUNT_EXCEPTION_MESSAGE; @@ -145,9 +147,12 @@ static class TieredSpilloverCacheSegment implements ICache { ReleasableLock writeLock = new ReleasableLock(readWriteLock.writeLock()); private final Map, TierInfo> caches; - + // Policies guarding access to the cache overall. private final List> policies; + // Policies guarding access to the disk tier. + private final List> diskPolicies; + private final TieredSpilloverCacheStatsHolder statsHolder; private final long onHeapCacheMaxWeight; @@ -157,7 +162,7 @@ static class TieredSpilloverCacheSegment implements ICache { * This map is used to handle concurrent requests for same key in computeIfAbsent() to ensure we load the value * only once. */ - Map, CompletableFuture, V>>> completableFutureMap = new ConcurrentHashMap<>(); + Map, CompletableFuture, V>, Boolean>>> completableFutureMap = new ConcurrentHashMap<>(); TieredSpilloverCacheSegment( Builder builder, @@ -220,7 +225,8 @@ static class TieredSpilloverCacheSegment implements ICache { cacheListMap.put(onHeapCache, new TierInfo(true, TIER_DIMENSION_VALUE_ON_HEAP)); cacheListMap.put(diskCache, new TierInfo(isDiskCacheEnabled, TIER_DIMENSION_VALUE_DISK)); this.caches = Collections.synchronizedMap(cacheListMap); - this.policies = builder.policies; // Will never be null; builder initializes it to an empty list + this.policies = builder.policies; + this.diskPolicies = builder.diskPolicies; // Will never be null; builder initializes it to an empty list this.onHeapCacheMaxWeight = onHeapCacheSizeInBytes; this.diskCacheMaxWeight = diskCacheSizeInBytes; } @@ -255,18 +261,19 @@ public V get(ICacheKey key) { public void put(ICacheKey key, V value) { // First check in case the key is already present in either of tiers. Tuple cacheValueTuple = getValueFromTieredCache(true).apply(key); - if (cacheValueTuple == null) { - // In case it is not present in any tier, put it inside onHeap cache by default. - try (ReleasableLock ignore = writeLock.acquire()) { - onHeapCache.put(key, value); - } - updateStatsOnPut(TIER_DIMENSION_VALUE_ON_HEAP, key, value); - } else { - // Put it inside desired tier. - try (ReleasableLock ignore = writeLock.acquire()) { - for (Map.Entry, TierInfo> entry : this.caches.entrySet()) { - if (cacheValueTuple.v2().equals(entry.getValue().tierName)) { - entry.getKey().put(key, value); + if (evaluatePoliciesList(value, policies)) { + if (cacheValueTuple == null) { + // In case it is not present in any tier, put it inside onHeap cache by default. + try (ReleasableLock ignore = writeLock.acquire()) { + onHeapCache.put(key, value); + } + updateStatsOnPut(TIER_DIMENSION_VALUE_ON_HEAP, key, value); + } else { + try (ReleasableLock ignore = writeLock.acquire()) { + for (Map.Entry, TierInfo> entry : this.caches.entrySet()) { + if (cacheValueTuple.v2().equals(entry.getValue().tierName)) { + entry.getKey().put(key, value); + } } } updateStatsOnPut(cacheValueTuple.v2(), key, value); @@ -281,7 +288,7 @@ public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V> // getValueFromTieredCache(), // we will see all misses. Instead, handle stats in computeIfAbsent(). Tuple cacheValueTuple; - CompletableFuture, V>> future = null; + CompletableFuture, V>, Boolean>> future = null; try (ReleasableLock ignore = readLock.acquire()) { cacheValueTuple = getValueFromTieredCache(false).apply(key); if (cacheValueTuple == null) { @@ -297,22 +304,25 @@ public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V> // Add the value to the onHeap cache. We are calling computeIfAbsent which does another get inside. // This is needed as there can be many requests for the same key at the same time and we only want to load // the value once. - V value = compute(key, loader, future); - // Handle stats - if (loader.isLoaded()) { - // The value was just computed and added to the cache by this thread. Register a miss for the heap cache, and the disk - // cache - // if present - updateStatsOnPut(TIER_DIMENSION_VALUE_ON_HEAP, key, value); - statsHolder.incrementMisses(heapDimensionValues); - if (caches.get(diskCache).isEnabled()) { - statsHolder.incrementMisses(diskDimensionValues); + Tuple> computedValueTuple = compute(key, loader, future); + boolean wasCacheMiss = computedValueTuple.v2().v1(); + boolean wasRejectedByPolicy = computedValueTuple.v2().v2(); + // If the value was rejected by policy, it counts as neither a hit or miss. + if (!wasRejectedByPolicy) { + // Handle stats + if (wasCacheMiss) { + // The value was just computed and added to the cache by this thread. + // Register a miss for the heap cache, and the disk cache if present + statsHolder.incrementMisses(heapDimensionValues); + if (caches.get(diskCache).isEnabled()) { + statsHolder.incrementMisses(diskDimensionValues); + } + } else { + // Another thread requesting this key already loaded the value. Register a hit for the heap cache + statsHolder.incrementHits(heapDimensionValues); } - } else { - // Another thread requesting this key already loaded the value. Register a hit for the heap cache - statsHolder.incrementHits(heapDimensionValues); } - return value; + return computedValueTuple.v1(); } else { // Handle stats for an initial hit from getValueFromTieredCache() if (cacheValueTuple.v2().equals(TIER_DIMENSION_VALUE_ON_HEAP)) { @@ -327,20 +337,33 @@ public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V> return cacheValueTuple.v1(); } - private V compute(ICacheKey key, LoadAwareCacheLoader, V> loader, CompletableFuture, V>> future) - throws Exception { - // Handler to handle results post-processing. Takes a tuple or exception as an input and returns - // the value. Also before returning value, puts the value in cache. - BiFunction, V>, Throwable, Void> handler = (pair, ex) -> { - if (pair != null) { + private Tuple> compute( + ICacheKey key, + LoadAwareCacheLoader, V> loader, + CompletableFuture, V>, Boolean>> future + ) throws Exception { + // Handler to handle results post-processing. Takes a Tuple, boolean>, where the boolean represents whether + // this key/value pair was rejected by the policies, + // or exception as an input and returns the value. Also before returning value, puts the value in cache if accepted by policies. + boolean wasCacheMiss = false; + boolean wasRejectedByPolicy = false; + BiFunction, V>, Boolean>, Throwable, Void> handler = (pairInfo, ex) -> { + Tuple, V> pair = pairInfo.v1(); + boolean rejectedByPolicy = pairInfo.v2(); + if (pair != null && !rejectedByPolicy) { + boolean didAddToCache = false; try (ReleasableLock ignore = writeLock.acquire()) { onHeapCache.put(pair.v1(), pair.v2()); + didAddToCache = true; } catch (Exception e) { // TODO: Catch specific exceptions to know whether this resulted from cache or underlying removal // listeners/stats. Needs better exception handling at underlying layers.For now swallowing // exception. logger.warn("Exception occurred while putting item onto heap cache", e); } + if (didAddToCache) { + updateStatsOnPut(TIER_DIMENSION_VALUE_ON_HEAP, key, pair.v2()); + } } else { if (ex != null) { logger.warn("Exception occurred while trying to compute the value", ex); @@ -364,16 +387,20 @@ private V compute(ICacheKey key, LoadAwareCacheLoader, V> loader future.completeExceptionally(npe); throw new ExecutionException(npe); } else { - future.complete(new Tuple<>(key, value)); + wasRejectedByPolicy = !evaluatePoliciesList(value, policies); + future.complete(new Tuple<>(new Tuple<>(key, value), wasRejectedByPolicy)); + wasCacheMiss = !wasRejectedByPolicy; } } else { try { - value = future.get().v2(); + Tuple, V>, Boolean> futureTuple = future.get(); + wasRejectedByPolicy = futureTuple.v2(); + value = futureTuple.v1().v2(); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } - return value; + return new Tuple<>(value, new Tuple<>(wasCacheMiss, wasRejectedByPolicy)); } @Override @@ -442,7 +469,9 @@ void handleRemovalFromHeapTier(RemovalNotification, V> notification boolean wasEvicted = SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()); boolean countEvictionTowardsTotal = false; // Don't count this eviction towards the cache's total if it ends up in the disk tier boolean exceptionOccurredOnDiskCachePut = false; - boolean canCacheOnDisk = caches.get(diskCache).isEnabled() && wasEvicted && evaluatePolicies(notification.getValue()); + boolean canCacheOnDisk = caches.get(diskCache).isEnabled() + && wasEvicted + && evaluatePoliciesList(notification.getValue(), diskPolicies); if (canCacheOnDisk) { try (ReleasableLock ignore = writeLock.acquire()) { diskCache.put(key, notification.getValue()); // spill over to the disk tier and increment its stats @@ -465,8 +494,8 @@ void handleRemovalFromHeapTier(RemovalNotification, V> notification updateStatsOnRemoval(TIER_DIMENSION_VALUE_ON_HEAP, wasEvicted, key, notification.getValue(), countEvictionTowardsTotal); } - boolean evaluatePolicies(V value) { - for (Predicate policy : policies) { + boolean evaluatePoliciesList(V value, List> policiesList) { + for (Predicate policy : policiesList) { if (!policy.test(value)) { return false; } @@ -822,8 +851,8 @@ public ICache create(CacheConfig config, CacheType cacheType, } ICache.Factory diskCacheFactory = cacheFactories.get(diskCacheStoreName); - TimeValue diskPolicyThreshold = TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType) - .get(settings); + TimeValue tookTimePolicyThreshold = TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType).get(settings); + TimeValue tookTimeDiskPolicyThreshold = TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType).get(settings); Function cachedResultParser = Objects.requireNonNull( config.getCachedResultParser(), "Cached result parser fn can't be null" @@ -849,7 +878,22 @@ public ICache create(CacheConfig config, CacheType cacheType, .setCacheConfig(config) .setCacheType(cacheType) .setNumberOfSegments(numberOfSegments) - .addPolicy(new TookTimePolicy(diskPolicyThreshold, cachedResultParser, config.getClusterSettings(), cacheType)) + .addPolicy( + new TookTimePolicy<>( + tookTimePolicyThreshold, + cachedResultParser, + config.getClusterSettings(), + TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType) + ) + ) + .addDiskPolicy( + new TookTimePolicy<>( + tookTimeDiskPolicyThreshold, + cachedResultParser, + config.getClusterSettings(), + TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType) + ) + ) .setOnHeapCacheSizeInBytes(onHeapCacheSize) .setDiskCacheSize(diskCacheSize) .build(); @@ -873,7 +917,8 @@ public static class Builder { private CacheConfig cacheConfig; private CacheType cacheType; private Map cacheFactories; - private final ArrayList> policies = new ArrayList<>(); + private final List> policies = new ArrayList<>(); + private final List> diskPolicies = new ArrayList<>(); private int numberOfSegments; private long onHeapCacheSizeInBytes; @@ -945,7 +990,7 @@ public Builder setCacheFactories(Map cacheFactorie } /** - * Set a cache policy to be used to limit access to this cache's disk tier. + * Set a cache policy to be used to limit access to this cache. * @param policy the policy * @return builder */ @@ -955,12 +1000,12 @@ public Builder addPolicy(Predicate policy) { } /** - * Set multiple policies to be used to limit access to this cache's disk tier. - * @param policies the policies + * Set a cache policy to be used to limit access to this cache's disk tier. + * @param diskPolicy the policy * @return builder */ - public Builder addPolicies(List> policies) { - this.policies.addAll(policies); + public Builder addDiskPolicy(Predicate diskPolicy) { + this.diskPolicies.add(diskPolicy); return this; } diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java index bf522b42b70ca..d1d033fae8cd2 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java @@ -21,6 +21,7 @@ import java.util.Map; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.DISK_CACHE_ENABLED_SETTING_MAP; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; /** @@ -62,6 +63,7 @@ public List> getSettings() { TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) ); settingList.add(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType)); + settingList.add(TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType)); if (FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings)) { settingList.add(DISK_CACHE_ENABLED_SETTING_MAP.get(cacheType)); } diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java index 31dc1795134e4..790e2ead729fe 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java @@ -38,6 +38,16 @@ public class TieredSpilloverCacheSettings { */ public static final long MIN_DISK_CACHE_SIZE_IN_BYTES = 10485760L; + /** + * The default took time threshold for a value to enter the heap tier of the cache, and therefore to enter the cache at all. + */ + public static final TimeValue DEFAULT_TOOK_TIME_THRESHOLD = TimeValue.ZERO; + + /** + * The default took time threshold for a value to enter the disk tier of the cache. + */ + public static final TimeValue DEFAULT_TOOK_TIME_DISK_THRESHOLD = new TimeValue(10, TimeUnit.MILLISECONDS); + /** * Setting which defines the onHeap cache store to be used in TieredSpilloverCache. * @@ -109,13 +119,27 @@ public class TieredSpilloverCacheSettings { ); /** - * Setting defining the minimum took time for a query to be allowed into the disk cache. + * Setting defining the minimum took time for a query to be allowed in the cache. + */ + private static final Setting.AffixSetting TIERED_SPILLOVER_TOOK_TIME_THRESHOLD = Setting.suffixKeySetting( + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".policies.took_time.threshold", + (key) -> Setting.timeSetting( + key, + DEFAULT_TOOK_TIME_THRESHOLD, + TimeValue.ZERO, // Minimum value for this setting + NodeScope, + Setting.Property.Dynamic + ) + ); + + /** + * Setting defining the minimum took time for a query to be allowed in the disk tier of the cache. */ private static final Setting.AffixSetting TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD = Setting.suffixKeySetting( TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".disk.store.policies.took_time.threshold", (key) -> Setting.timeSetting( key, - new TimeValue(10, TimeUnit.MILLISECONDS), // Default value for this setting + DEFAULT_TOOK_TIME_DISK_THRESHOLD, TimeValue.ZERO, // Minimum value for this setting NodeScope, Setting.Property.Dynamic @@ -128,6 +152,12 @@ public class TieredSpilloverCacheSettings { */ public static final Map> TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; + /** + * Stores took time policy settings for the disk tiers of various cache types as these are dynamic so that can be registered and + * retrieved accordingly. + */ + public static final Map> TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP; + /** * Stores disk cache enabled settings for various cache types as these are dynamic so that can be registered and * retrieved accordingly. @@ -139,9 +169,14 @@ public class TieredSpilloverCacheSettings { */ static { Map> concreteTookTimePolicySettingMap = new HashMap<>(); + Map> concreteDiskTookTimePolicySettingMap = new HashMap<>(); Map> diskCacheSettingMap = new HashMap<>(); for (CacheType cacheType : CacheType.values()) { concreteTookTimePolicySettingMap.put( + cacheType, + TIERED_SPILLOVER_TOOK_TIME_THRESHOLD.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) + ); + concreteDiskTookTimePolicySettingMap.put( cacheType, TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) ); @@ -151,6 +186,7 @@ public class TieredSpilloverCacheSettings { ); } TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP = concreteTookTimePolicySettingMap; + TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP = concreteDiskTookTimePolicySettingMap; DISK_CACHE_ENABLED_SETTING_MAP = diskCacheSettingMap; } diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/policy/TookTimePolicyTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/policy/TookTimePolicyTests.java index 000067280e50d..535274b30f2d9 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/policy/TookTimePolicyTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/policy/TookTimePolicyTests.java @@ -11,7 +11,6 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; -import org.opensearch.common.Randomness; import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.policy.CachedQueryResult; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -20,7 +19,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.query.QuerySearchResult; import org.opensearch.test.OpenSearchTestCase; @@ -28,7 +26,7 @@ import java.io.IOException; import java.util.HashSet; -import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; @@ -52,7 +50,12 @@ public void setup() { } private TookTimePolicy getTookTimePolicy(TimeValue threshold) { - return new TookTimePolicy<>(threshold, transformationFunction, clusterSettings, CacheType.INDICES_REQUEST_CACHE); + return new TookTimePolicy<>( + threshold, + transformationFunction, + clusterSettings, + TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + ); } public void testTookTimePolicy() throws Exception { @@ -75,17 +78,31 @@ public void testTookTimePolicy() throws Exception { assertTrue(longResult); } - public void testNegativeOneInput() throws Exception { - // PolicyValues with -1 took time can be passed to this policy if we shouldn't accept it for whatever reason - TookTimePolicy tookTimePolicy = getTookTimePolicy(TimeValue.ZERO); - BytesReference minusOne = getValidPolicyInput(-1L); - assertFalse(tookTimePolicy.test(minusOne)); - } - public void testInvalidThreshold() throws Exception { assertThrows(IllegalArgumentException.class, () -> getTookTimePolicy(TimeValue.MINUS_ONE)); } + public void testZeroThresholdSkipsCheck() throws Exception { + AtomicInteger numChecksRun = new AtomicInteger(); + Function dummyTransformationFunction = (data) -> { + numChecksRun.incrementAndGet(); + try { + return CachedQueryResult.getPolicyValues(data); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + TookTimePolicy policy = new TookTimePolicy<>( + TimeValue.ZERO, + dummyTransformationFunction, + clusterSettings, + TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + ); + BytesReference minusOne = getValidPolicyInput(-1L); + assertTrue(policy.test(minusOne)); + assertEquals(0, numChecksRun.get()); + } + private BytesReference getValidPolicyInput(Long tookTimeNanos) throws IOException { // When it's used in the cache, the policy will receive BytesReferences which come from // serializing a CachedQueryResult. @@ -109,11 +126,4 @@ private QuerySearchResult getQSR() { ); return mockQSR; } - - private void writeRandomBytes(StreamOutput out, int numBytes) throws IOException { - Random rand = Randomness.get(); - byte[] bytes = new byte[numBytes]; - rand.nextBytes(bytes); - out.writeBytes(bytes); - } } diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index 494534ac74c9f..c74eb371709f6 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -24,6 +24,7 @@ import org.opensearch.common.cache.store.OpenSearchOnHeapCache; import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -57,10 +58,12 @@ import java.util.function.Predicate; import static org.opensearch.cache.common.tier.TieredSpilloverCache.ZERO_SEGMENT_COUNT_EXCEPTION_MESSAGE; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.DEFAULT_TOOK_TIME_DISK_THRESHOLD; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.DISK_CACHE_ENABLED_SETTING_MAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.MIN_DISK_CACHE_SIZE_IN_BYTES; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_SEGMENTS; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_NAME; import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK; @@ -83,6 +86,7 @@ public void setup() { Settings settings = Settings.EMPTY; clusterSettings = new ClusterSettings(settings, new HashSet<>()); clusterSettings.registerSetting(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE)); + clusterSettings.registerSetting(TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE)); clusterSettings.registerSetting(DISK_CACHE_ENABLED_SETTING_MAP.get(CacheType.INDICES_REQUEST_CACHE)); } @@ -191,8 +195,8 @@ public void testComputeIfAbsentWithFactoryBasedCacheCreation() throws Exception .setValueSerializer(new StringSerializer()) .setSettings(settings) .setDimensionNames(dimensionNames) - .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken - // 20_000_000 ns = 20 ms to compute + // Values will always appear to have taken 2x the took time threshold to compute, so they will be admitted + .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(DEFAULT_TOOK_TIME_DISK_THRESHOLD.getNanos() * 2)) .setClusterSettings(clusterSettings) .setStoragePath(storagePath) .build(), @@ -291,8 +295,8 @@ public void testComputeIfAbsentWithSegmentedCache() throws Exception { .setValueSerializer(new StringSerializer()) .setSettings(settings) .setDimensionNames(dimensionNames) - .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken - // 20_000_000 ns = 20 ms to compute + // Values will always appear to have taken 2x the took time threshold to compute, so they will be admitted + .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(DEFAULT_TOOK_TIME_DISK_THRESHOLD.getNanos() * 2)) .setClusterSettings(clusterSettings) .setStoragePath(storagePath) .setSegmentCount(numberOfSegments) @@ -1155,6 +1159,7 @@ public void testComputeIfAbsentWithOnHeapCacheThrowingExceptionOnPut() throws Ex mockDiskCacheFactory, cacheConfig, null, + null, removalListener, 1, onHeapCacheSize * keyValueSize, @@ -1202,6 +1207,7 @@ public void testComputeIfAbsentWithDiskCacheThrowingExceptionOnPut() throws Exce mockDiskCacheFactory, cacheConfig, null, + null, removalListener, 1, onHeapCacheSize * keyValueSize, @@ -1356,14 +1362,13 @@ public void testConcurrencyForEvictionFlowFromOnHeapToDiskTier() throws Exceptio } public void testDiskTierPolicies() throws Exception { - // For policy function, allow if what it receives starts with "a" and string is even length - ArrayList> policies = new ArrayList<>(); - policies.add(new AllowFirstLetterA()); - policies.add(new AllowEvenLengths()); + // For disk policy function, allow if what it receives starts with "a" and string is even length + Tuple>, Map>> setupTuple = setupPoliciesTest(); + List> diskPolicies = setupTuple.v1(); int keyValueSize = 50; MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); - TieredSpilloverCache tieredSpilloverCache = intializeTieredSpilloverCache( + TieredSpilloverCache tieredSpilloverCache = initializeTieredSpilloverCache( keyValueSize, keyValueSize * 100, removalListener, @@ -1376,35 +1381,22 @@ public void testDiskTierPolicies() throws Exception { ) .build(), 0, - policies, + diskPolicies, 1 ); - Map keyValuePairs = new HashMap<>(); - Map expectedOutputs = new HashMap<>(); - keyValuePairs.put("key1", "abcd"); - expectedOutputs.put("key1", true); - keyValuePairs.put("key2", "abcde"); - expectedOutputs.put("key2", false); - keyValuePairs.put("key3", "bbc"); - expectedOutputs.put("key3", false); - keyValuePairs.put("key4", "ab"); - expectedOutputs.put("key4", true); - keyValuePairs.put("key5", ""); - expectedOutputs.put("key5", false); - + Map> keyValuePairs = setupTuple.v2(); LoadAwareCacheLoader, String> loader = getLoadAwareCacheLoader(keyValuePairs); - int expectedEvictions = 0; for (String key : keyValuePairs.keySet()) { ICacheKey iCacheKey = getICacheKey(key); - Boolean expectedOutput = expectedOutputs.get(key); + Boolean expectedOutput = keyValuePairs.get(key).v2(); String value = tieredSpilloverCache.computeIfAbsent(iCacheKey, loader); - assertEquals(keyValuePairs.get(key), value); + assertEquals(keyValuePairs.get(key).v1(), value); String result = tieredSpilloverCache.get(iCacheKey); if (expectedOutput) { // Should retrieve from disk tier if it was accepted - assertEquals(keyValuePairs.get(key), result); + assertEquals(keyValuePairs.get(key).v1(), result); } else { // Should miss as heap tier size = 0 and the policy rejected it assertNull(result); @@ -1419,19 +1411,70 @@ public void testDiskTierPolicies() throws Exception { assertEquals(expectedEvictions, getTotalStatsSnapshot(tieredSpilloverCache).getEvictions()); } - public void testTookTimePolicyFromFactory() throws Exception { + private Tuple>, Map>> setupPoliciesTest() { + ArrayList> policies = new ArrayList<>(); + policies.add(new AllowFirstLetterA()); + policies.add(new AllowEvenLengths()); + + // Map from key to tuple of (value, whether we expect it to be admitted by policy) + Map> keyValuePairs = new HashMap<>(); + keyValuePairs.put("key1", new Tuple<>("abcd", true)); + keyValuePairs.put("key2", new Tuple<>("abcde", false)); + keyValuePairs.put("key3", new Tuple<>("bbc", false)); + keyValuePairs.put("key4", new Tuple<>("ab", true)); + keyValuePairs.put("key5", new Tuple<>("", false)); + return new Tuple<>(policies, keyValuePairs); + } + + public void testTookTimePoliciesFromFactory() throws Exception { // Mock took time by passing this map to the policy info wrapper fn // The policy inspects values, not keys, so this is a map from values -> took time + + long cacheThresholdNanos = 2_000_000L; + long diskThresholdNanos = 11_000_000L; Map tookTimeMap = new HashMap<>(); - tookTimeMap.put("a", 10_000_000L); + tookTimeMap.put("a", diskThresholdNanos); tookTimeMap.put("b", 0L); - tookTimeMap.put("c", 99_999_999L); + tookTimeMap.put("c", diskThresholdNanos * 3); tookTimeMap.put("d", null); tookTimeMap.put("e", -1L); - tookTimeMap.put("f", 8_888_888L); - long timeValueThresholdNanos = 10_000_000L; - - Map keyValueMap = Map.of("A", "a", "B", "b", "C", "c", "D", "d", "E", "e", "F", "f"); + tookTimeMap.put("f", cacheThresholdNanos * 2); + tookTimeMap.put("g", cacheThresholdNanos - 1); + assertTrue(cacheThresholdNanos * 2 < diskThresholdNanos); + + Map keyValueMap = Map.of("A", "a", "B", "b", "C", "c", "D", "d", "E", "e", "F", "f", "G", "g"); + Map expectedInHeapTierMap = Map.of( + "A", + true, + "B", + false, + "C", + true, + "D", + false, + "E", + false, + "F", + true, + "G", + false + ); + Map expectedInDiskTierMap = Map.of( + "A", + true, + "B", + false, + "C", + true, + "D", + false, + "E", + false, + "F", + false, + "G", + false + ); // Most of setup duplicated from testComputeIfAbsentWithFactoryBasedCacheCreation() int onHeapCacheSize = randomIntBetween(tookTimeMap.size() + 1, tookTimeMap.size() + 30); @@ -1460,10 +1503,9 @@ public void testTookTimePolicyFromFactory() throws Exception { ).getKey(), onHeapCacheSize * keyValueSize + "b" ) - .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), - new TimeValue(timeValueThresholdNanos / 1_000_000) - ) + // Initialize the settings to some other value, so we can demonstrate the updating logic works correctly. + .put(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), TimeValue.ZERO) + .put(TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), TimeValue.ZERO) .put(TIERED_SPILLOVER_SEGMENTS.getConcreteSettingForNamespace(CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()).getKey(), 1) .build(); @@ -1497,28 +1539,57 @@ public CachedQueryResult.PolicyValues apply(String s) { TieredSpilloverCache tieredSpilloverCache = (TieredSpilloverCache) tieredSpilloverICache; - // First add all our values to the on heap cache - for (String key : tookTimeMap.keySet()) { - tieredSpilloverCache.computeIfAbsent(getICacheKey(key), getLoadAwareCacheLoader(keyValueMap)); + // Change setting values to the target values to show both updates work as expected. + clusterSettings.applySettings( + Settings.builder() + .put( + TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(cacheThresholdNanos / 1_000_000) + ) + .put( + TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(diskThresholdNanos / 1_000_000) + ) + .build() + ); + + Map> loaderMap = new HashMap<>(); + for (String key : keyValueMap.keySet()) { + // The boolean here is not needed, just to fit with the get loader method + loaderMap.put(key, new Tuple<>(keyValueMap.get(key), false)); + } + LoadAwareCacheLoader, String> loader = getLoadAwareCacheLoader(loaderMap); + // First check whether keys respect the heap tier threshold. + int expectedKeys = 0; + for (String key : keyValueMap.keySet()) { + tieredSpilloverCache.computeIfAbsent(getICacheKey(key), loader); + if (expectedInHeapTierMap.get(key)) { + expectedKeys++; + } } - assertEquals(tookTimeMap.size(), tieredSpilloverCache.count()); + assertEquals(0, removalListener.evictionsMetric.count()); + assertEquals(0, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(expectedKeys, tieredSpilloverCache.count()); - // Ensure all these keys get evicted from the on heap tier by adding > heap tier size worth of random keys (this works as we have 1 - // segment) + // Ensure all these keys get evicted from the on heap tier by adding > heap tier size worth of random keys + // (this works as we have 1 segment). Set heap threshold to 0 to ensure random keys can all enter + clusterSettings.applySettings( + Settings.builder() + .put(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), TimeValue.ZERO) + .build() + ); for (int i = 0; i < onHeapCacheSize; i++) { - tieredSpilloverCache.computeIfAbsent(getICacheKey(UUID.randomUUID().toString()), getLoadAwareCacheLoader(keyValueMap)); + tieredSpilloverCache.computeIfAbsent(getICacheKey(UUID.randomUUID().toString()), getLoadAwareCacheLoader()); } - for (String key : tookTimeMap.keySet()) { + for (String key : keyValueMap.keySet()) { ICacheKey iCacheKey = getICacheKey(key); assertNull(tieredSpilloverCache.getTieredCacheSegment(iCacheKey).getOnHeapCache().get(iCacheKey)); } // Now the original keys should be in the disk tier if the policy allows them, or misses if not - for (String key : tookTimeMap.keySet()) { + for (String key : keyValueMap.keySet()) { String computedValue = tieredSpilloverCache.get(getICacheKey(key)); - String mapValue = keyValueMap.get(key); - Long tookTime = tookTimeMap.get(mapValue); - if (tookTime != null && tookTime > timeValueThresholdNanos) { + if (expectedInDiskTierMap.get(key)) { // expect a hit assertNotNull(computedValue); } else { @@ -1543,6 +1614,139 @@ public void testMinimumThresholdSettingValue() throws Exception { assertEquals(validDuration, concreteSetting.get(validSettings)); } + public void testEntryPoliciesWithPut() throws Exception { + Tuple>, Map>> setupTuple = setupPoliciesTest(); + List> policies = setupTuple.v1(); + Map> keyValuePairs = setupTuple.v2(); + + int keyValueSize = 50; + MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache tieredSpilloverCache = initializeTieredSpilloverCache( + keyValueSize, + keyValueSize * 100, + removalListener, + Settings.builder() + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + keyValueSize * keyValuePairs.size() + 1 + "b" + ) + .build(), + 0, + policies, + null, + 1 + ); + + int expectedKeys = 0; + for (String key : keyValuePairs.keySet()) { + ICacheKey iCacheKey = getICacheKey(key); + tieredSpilloverCache.put(iCacheKey, keyValuePairs.get(key).v1()); + Boolean expectedOutput = keyValuePairs.get(key).v2(); + String result = tieredSpilloverCache.get(iCacheKey); + if (expectedOutput) { + // Should retrieve from heap tier if it was accepted + assertEquals(keyValuePairs.get(key).v1(), result); + expectedKeys++; + } else { + // Should miss as the policy rejected it + assertNull(result); + } + } + + assertEquals(0, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(expectedKeys, getTotalStatsSnapshot(tieredSpilloverCache).getItems()); + assertEquals(0, removalListener.evictionsMetric.count()); + } + + public void testEntryPoliciesConcurrentlyWithComputeIfAbsent() throws Exception { + Tuple>, Map>> setupTuple = setupPoliciesTest(); + List> policies = setupTuple.v1(); + Map> keyValuePairs = setupTuple.v2(); + + int keyValueSize = 50; + MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); + LoadAwareCacheLoader, String> loader = getLoadAwareCacheLoader(keyValuePairs); + TieredSpilloverCache tieredSpilloverCache = initializeTieredSpilloverCache( + keyValueSize, + keyValueSize * 100, + removalListener, + Settings.builder() + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + keyValueSize * keyValuePairs.size() + 1 + "b" + ) + .build(), + 0, + policies, + null, + 1 + ); + + // To test concurrently, run for each key multiple times in parallel threads + int numRepetitionsPerKey = 10; + int numThreads = keyValuePairs.size() * numRepetitionsPerKey; + + Thread[] threads = new Thread[numThreads]; + Phaser phaser = new Phaser(numThreads + 1); + CountDownLatch countDownLatch = new CountDownLatch(numThreads); + + // Get number of keys we expect to enter the cache + int expectedKeys = 0; + for (String key : keyValuePairs.keySet()) { + Boolean expectedOutput = keyValuePairs.get(key).v2(); + if (expectedOutput) { + expectedKeys++; + } + } + + int threadNumber = 0; + for (String key : keyValuePairs.keySet()) { + for (int j = 0; j < numRepetitionsPerKey; j++) { + threads[threadNumber] = new Thread(() -> { + try { + phaser.arriveAndAwaitAdvance(); + ICacheKey iCacheKey = getICacheKey(key); + tieredSpilloverCache.computeIfAbsent(iCacheKey, loader); + } catch (Exception ignored) {} finally { + countDownLatch.countDown(); + } + }); + threads[threadNumber].start(); + threadNumber++; + } + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + + assertEquals(0, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(expectedKeys, getItemsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + // We should have (numRepetitionsPerKey - 1) * (expectedKeys) hits + assertEquals((numRepetitionsPerKey - 1) * expectedKeys, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + // We should have 1 miss for each accepted key. Rejected keys should not cause misses. + assertEquals(expectedKeys, getMissesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + + for (String key : keyValuePairs.keySet()) { + ICacheKey iCacheKey = getICacheKey(key); + String result = tieredSpilloverCache.get(iCacheKey); + Boolean expectedInCache = keyValuePairs.get(key).v2(); + if (expectedInCache) { + // Should retrieve from heap tier if it was accepted + assertEquals(keyValuePairs.get(key).v1(), result); + } else { + // Should miss as the policy rejected it + assertNull(result); + } + } + + assertEquals(0, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(expectedKeys, getTotalStatsSnapshot(tieredSpilloverCache).getItems()); + assertEquals(0, removalListener.evictionsMetric.count()); + } + public void testPutWithDiskCacheDisabledSetting() throws Exception { int onHeapCacheSize = randomIntBetween(10, 30); int diskCacheSize = randomIntBetween(300, 500); @@ -1972,8 +2176,8 @@ public void testWithInvalidSegmentNumber() throws Exception { .setValueSerializer(new StringSerializer()) .setSettings(settings) .setDimensionNames(dimensionNames) - .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken - // 20_000_000 ns = 20 ms to compute + // Values will always appear to have taken 2x the took time threshold to compute, so they will be admitted + .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(DEFAULT_TOOK_TIME_DISK_THRESHOLD.getNanos() * 2)) .setClusterSettings(clusterSettings) .setStoragePath(storagePath) .build(), @@ -2037,8 +2241,8 @@ public void testWithVeryLowDiskCacheSize() throws Exception { .setValueSerializer(new StringSerializer()) .setSettings(settings) .setDimensionNames(dimensionNames) - .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken - // 20_000_000 ns = 20 ms to compute + // Values will always appear to have taken 2x the took time threshold to compute, so they will be admitted + .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(DEFAULT_TOOK_TIME_DISK_THRESHOLD.getNanos() * 2)) .setClusterSettings(clusterSettings) .setStoragePath(storagePath) .build(), @@ -2096,8 +2300,8 @@ public void testTieredCacheDefaultSegmentCount() { .setValueSerializer(new StringSerializer()) .setSettings(settings) .setDimensionNames(dimensionNames) - .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken - // 20_000_000 ns = 20 ms to compute + // Values will always appear to have taken 2x the took time threshold to compute, so they will be admitted + .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(DEFAULT_TOOK_TIME_DISK_THRESHOLD.getNanos() * 2)) .setClusterSettings(clusterSettings) .setStoragePath(storagePath) .build(), @@ -2234,8 +2438,8 @@ public void testSegmentSizesWhenUsingFactory() { .setValueSerializer(new StringSerializer()) .setSettings(settings) .setDimensionNames(dimensionNames) - .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken - // 20_000_000 ns = 20 ms to compute + // Values will always appear to have taken 2x the took time threshold to compute, so they will be admitted + .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(DEFAULT_TOOK_TIME_DISK_THRESHOLD.getNanos() * 2)) .setClusterSettings(clusterSettings) .setStoragePath(storagePath) .build(), @@ -2281,6 +2485,7 @@ public void testSegmentSizesWhenNotUsingFactory() { new MockDiskCache.MockDiskCacheFactory(0, diskSizeFromImplSetting, true, keyValueSize), cacheConfig, null, + null, removalListener, numSegments, expectedHeapSize, @@ -2351,14 +2556,14 @@ public boolean isLoaded() { }; } - private LoadAwareCacheLoader, String> getLoadAwareCacheLoader(Map keyValueMap) { + private LoadAwareCacheLoader, String> getLoadAwareCacheLoader(Map> keyValueMap) { return new LoadAwareCacheLoader<>() { boolean isLoaded = false; @Override public String load(ICacheKey key) { isLoaded = true; - String mapValue = keyValueMap.get(key.key); + String mapValue = keyValueMap.get(key.key).v1(); if (mapValue == null) { mapValue = UUID.randomUUID().toString(); } @@ -2377,6 +2582,7 @@ private TieredSpilloverCache getTieredSpilloverCache( ICache.Factory mockDiskCacheFactory, CacheConfig cacheConfig, List> policies, + List> diskPolicies, RemovalListener, String> removalListener, int numberOfSegments, long onHeapCacheSizeInBytes, @@ -2393,7 +2599,14 @@ private TieredSpilloverCache getTieredSpilloverCache( .setOnHeapCacheSizeInBytes(onHeapCacheSizeInBytes) .setCacheConfig(cacheConfig); if (policies != null) { - builder.addPolicies(policies); + for (Predicate policy : policies) { + builder.addPolicy(policy); + } + } + if (diskPolicies != null) { + for (Predicate diskPolicy : diskPolicies) { + builder.addDiskPolicy(diskPolicy); + } } return builder.build(); } @@ -2406,7 +2619,7 @@ private TieredSpilloverCache initializeTieredSpilloverCache( long diskDeliberateDelay ) { - return intializeTieredSpilloverCache(keyValueSize, diskCacheSize, removalListener, settings, diskDeliberateDelay, null, 256); + return initializeTieredSpilloverCache(keyValueSize, diskCacheSize, removalListener, settings, diskDeliberateDelay, null, 256); } private TieredSpilloverCache initializeTieredSpilloverCache( @@ -2418,7 +2631,7 @@ private TieredSpilloverCache initializeTieredSpilloverCache( int numberOfSegments ) { - return intializeTieredSpilloverCache( + return initializeTieredSpilloverCache( keyValueSize, diskCacheSize, removalListener, @@ -2429,13 +2642,35 @@ private TieredSpilloverCache initializeTieredSpilloverCache( ); } - private TieredSpilloverCache intializeTieredSpilloverCache( + private TieredSpilloverCache initializeTieredSpilloverCache( + int keyValueSize, + int diskCacheSize, + RemovalListener, String> removalListener, + Settings settings, + long diskDeliberateDelay, + List> diskPolicies, + int numberOfSegments + ) { + return initializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + settings, + diskDeliberateDelay, + new ArrayList>(), + diskPolicies, + numberOfSegments + ); + } + + private TieredSpilloverCache initializeTieredSpilloverCache( int keyValueSize, int diskCacheSize, RemovalListener, String> removalListener, Settings settings, long diskDeliberateDelay, List> policies, + List> diskPolicies, int numberOfSegments ) { ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); @@ -2481,6 +2716,7 @@ private TieredSpilloverCache intializeTieredSpilloverCache( mockDiskCacheFactory, cacheConfig, policies, + diskPolicies, removalListener, numberOfSegments, onHeapCacheSizeInBytes, From bd6e2a7e99236645aaddae6ff4858265bc0e0a99 Mon Sep 17 00:00:00 2001 From: Rishabh Singh Date: Thu, 27 Feb 2025 14:58:21 -0800 Subject: [PATCH 30/48] Refresh benchmark configs to use 3.0.0-alpha1 version (#17476) Signed-off-by: Rishabh Singh --- .github/benchmark-configs.json | 121 +++---------------- .github/workflows/benchmark-pull-request.yml | 2 +- 2 files changed, 19 insertions(+), 104 deletions(-) diff --git a/.github/benchmark-configs.json b/.github/benchmark-configs.json index 1c80f5048a611..17644c067ac98 100644 --- a/.github/benchmark-configs.json +++ b/.github/benchmark-configs.json @@ -2,7 +2,7 @@ "name": "Cluster and opensearch-benchmark configurations", "id_1": { "description": "Indexing only configuration for NYC_TAXIS workload", - "supported_major_versions": ["2", "3"], + "supported_major_versions": ["3"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", @@ -19,7 +19,7 @@ }, "id_2": { "description": "Indexing only configuration for HTTP_LOGS workload", - "supported_major_versions": ["2", "3"], + "supported_major_versions": ["3"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", @@ -41,7 +41,7 @@ "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "nyc_taxis", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"nyc_taxis_1_shard\"}", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"nyc_taxis_1_shard\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" }, @@ -52,81 +52,13 @@ "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_4": { - "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-3.0.0", - "supported_major_versions": ["3"], - "cluster-benchmark-configs": { - "SINGLE_NODE_CLUSTER": "true", - "MIN_DISTRIBUTION": "true", - "TEST_WORKLOAD": "http_logs", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"http_logs_1_shard\"}", - "CAPTURE_NODE_STAT": "true", - "TEST_PROCEDURE": "restore-from-snapshot" - }, - "cluster_configuration": { - "size": "Single-Node", - "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - }, - "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" - }, - "id_5": { "description": "Search only test-procedure for big5, uses snapshot to restore the data for OS-3.0.0", "supported_major_versions": ["3"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "big5", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard_ordered\"}", - "CAPTURE_NODE_STAT": "true", - "TEST_PROCEDURE": "restore-from-snapshot" - }, - "cluster_configuration": { - "size": "Single-Node", - "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - }, - "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" - }, - "id_6": { - "description": "Search only test-procedure for NYC_TAXIS, uses snapshot to restore the data for OS-2.x", - "supported_major_versions": ["2"], - "cluster-benchmark-configs": { - "SINGLE_NODE_CLUSTER": "true", - "MIN_DISTRIBUTION": "true", - "TEST_WORKLOAD": "nyc_taxis", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots\",\"snapshot_name\":\"nyc_taxis_1_shard\"}", - "CAPTURE_NODE_STAT": "true", - "TEST_PROCEDURE": "restore-from-snapshot" - }, - "cluster_configuration": { - "size": "Single-Node", - "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - }, - "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" - }, - "id_7": { - "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-2.x", - "supported_major_versions": ["2"], - "cluster-benchmark-configs": { - "SINGLE_NODE_CLUSTER": "true", - "MIN_DISTRIBUTION": "true", - "TEST_WORKLOAD": "http_logs", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots\",\"snapshot_name\":\"http_logs_1_shard\"}", - "CAPTURE_NODE_STAT": "true", - "TEST_PROCEDURE": "restore-from-snapshot" - }, - "cluster_configuration": { - "size": "Single-Node", - "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - }, - "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" - }, - "id_8": { - "description": "Search only test-procedure for big5, uses snapshot to restore the data for OS-2.x", - "supported_major_versions": ["2"], - "cluster-benchmark-configs": { - "SINGLE_NODE_CLUSTER": "true", - "MIN_DISTRIBUTION": "true", - "TEST_WORKLOAD": "big5", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots\",\"snapshot_name\":\"big5_1_shard_ordered\"}", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"big5_1_shard_single_client\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" }, @@ -136,9 +68,9 @@ }, "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, - "id_9": { + "id_5": { "description": "Indexing and search configuration for pmc workload", - "supported_major_versions": ["2", "3"], + "supported_major_versions": ["3"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", @@ -152,9 +84,9 @@ }, "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" }, - "id_10": { + "id_6": { "description": "Indexing only configuration for stack-overflow workload", - "supported_major_versions": ["2", "3"], + "supported_major_versions": ["3"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", @@ -168,7 +100,7 @@ }, "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" }, - "id_11": { + "id_7": { "description": "Search only test-procedure for big5 with concurrent segment search setting enabled", "supported_major_versions": ["3"], "cluster-benchmark-configs": { @@ -176,7 +108,7 @@ "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "big5", "ADDITIONAL_CONFIG": "search.concurrent_segment_search.enabled:true", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard_ordered\"}", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"big5_1_shard_single_client\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" }, @@ -186,7 +118,7 @@ }, "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, - "id_12": { + "id_8": { "description": "Search only test-procedure for big5 with concurrent segment search mode as all", "supported_major_versions": ["3"], "cluster-benchmark-configs": { @@ -194,7 +126,7 @@ "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "big5", "ADDITIONAL_CONFIG": "search.concurrent_segment_search.mode:all", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard_ordered\"}", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"big5_1_shard_single_client\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" }, @@ -204,7 +136,7 @@ }, "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, - "id_13": { + "id_9": { "description": "Search only test-procedure for big5 with concurrent segment search mode as auto", "supported_major_versions": ["3"], "cluster-benchmark-configs": { @@ -212,7 +144,7 @@ "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "big5", "ADDITIONAL_CONFIG": "search.concurrent_segment_search.mode:auto", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard_ordered\"}", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"big5_1_shard_single_client\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" }, @@ -222,7 +154,7 @@ }, "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, - "id_14": { + "id_10": { "description": "Search only test-procedure for big5, uses snapshot to restore the data for OS-3.0.0. Enables range query approximation.", "supported_major_versions": ["3"], "cluster-benchmark-configs": { @@ -230,23 +162,6 @@ "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "big5", "ADDITIONAL_CONFIG": "opensearch.experimental.feature.approximate_point_range_query.enabled:true", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard_ordered\"}", - "CAPTURE_NODE_STAT": "true", - "TEST_PROCEDURE": "restore-from-snapshot" - }, - "cluster_configuration": { - "size": "Single-Node", - "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - }, - "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" - }, - "id_15": { - "description": "Search only test-procedure for big5, uses lucene-10 index snapshot to restore the data for OS-3.0.0", - "supported_major_versions": ["3"], - "cluster-benchmark-configs": { - "SINGLE_NODE_CLUSTER": "true", - "MIN_DISTRIBUTION": "true", - "TEST_WORKLOAD": "big5", "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"big5_1_shard_single_client\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" @@ -257,9 +172,9 @@ }, "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, - "id_16": { + "id_11": { "description": "Benchmarking config for NESTED workload, benchmarks nested queries with inner-hits", - "supported_major_versions": ["2", "3"], + "supported_major_versions": ["3"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", @@ -273,4 +188,4 @@ }, "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" } - } +} \ No newline at end of file diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml index e6ccc31160bf9..38e12f97d4480 100644 --- a/.github/workflows/benchmark-pull-request.yml +++ b/.github/workflows/benchmark-pull-request.yml @@ -22,7 +22,7 @@ jobs: echo "PR_NUMBER=${{ github.event.issue.number }}" >> $GITHUB_ENV echo "REPOSITORY=${{ github.event.repository.full_name }}" >> $GITHUB_ENV OPENSEARCH_VERSION=$(awk -F '=' '/^opensearch[[:space:]]*=/ {gsub(/[[:space:]]/, "", $2); print $2}' buildSrc/version.properties) - echo "OPENSEARCH_VERSION=$OPENSEARCH_VERSION" >> $GITHUB_ENV + echo "OPENSEARCH_VERSION=$OPENSEARCH_VERSION-alpha1" >> $GITHUB_ENV major_version=$(echo $OPENSEARCH_VERSION | cut -d'.' -f1) echo "OPENSEARCH_MAJOR_VERSION=$major_version" >> $GITHUB_ENV echo "USER_TAGS=pull_request_number:${{ github.event.issue.number }},repository:OpenSearch" >> $GITHUB_ENV From 0dde4da59e50835701f03b21022633aae959ef99 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 27 Feb 2025 18:55:15 -0500 Subject: [PATCH 31/48] Add 2.19.1 release notes (#17468) (#17477) Signed-off-by: Andriy Redko (cherry picked from commit 2e4741fb45d1b150aaeeadf66d41445b23ff5982) --- CHANGELOG.md | 2 -- release-notes/opensearch.release-notes-2.19.1.md | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 release-notes/opensearch.release-notes-2.19.1.md diff --git a/CHANGELOG.md b/CHANGELOG.md index bd218393919bd..45fd4813e72da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,7 +20,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.code.gson:gson` from 2.11.0 to 2.12.1 ([#17229](https://github.com/opensearch-project/OpenSearch/pull/17229)) - Bump `org.jruby.joni:joni` from 2.2.1 to 2.2.3 ([#17136](https://github.com/opensearch-project/OpenSearch/pull/17136)) - Bump `org.apache.ant:ant` from 1.10.14 to 1.10.15 ([#17288](https://github.com/opensearch-project/OpenSearch/pull/17288)) -- Bump netty from 4.1.117.Final to 4.1.118.Final ([#17320](https://github.com/opensearch-project/OpenSearch/pull/17320)) - Bump `reactor_netty` from 1.1.26 to 1.1.27 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322)) - Bump `me.champeau.gradle.japicmp` from 0.4.5 to 0.4.6 ([#17375](https://github.com/opensearch-project/OpenSearch/pull/17375)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.37.1 to 2.52.0 ([#17379](https://github.com/opensearch-project/OpenSearch/pull/17379)) @@ -41,7 +40,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) - Add highlighting for wildcard search on `match_only_text` field ([#17101](https://github.com/opensearch-project/OpenSearch/pull/17101)) - Fix illegal argument exception when creating a PIT ([#16781](https://github.com/opensearch-project/OpenSearch/pull/16781)) -- Fix HTTP API calls that hang with 'Accept-Encoding: zstd' ([#17408](https://github.com/opensearch-project/OpenSearch/pull/17408)) ### Security diff --git a/release-notes/opensearch.release-notes-2.19.1.md b/release-notes/opensearch.release-notes-2.19.1.md new file mode 100644 index 0000000000000..81eccde2b1c30 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.19.1.md @@ -0,0 +1,16 @@ +## 2025-02-27 Version 2.19.1 Release Notes + +## [2.19.1] +### Added +- Add execution_hint to cardinality aggregator request (#[17420](https://github.com/opensearch-project/OpenSearch/pull/17420)) + +### Dependencies +- Bump netty from 4.1.117.Final to 4.1.118.Final ([#17320](https://github.com/opensearch-project/OpenSearch/pull/17320)) +- Bump `jetty` version from 9.4.55.v20240627 to 9.4.57.v20241219 + +### Changed + +### Deprecated + +### Fixed +- Fix HTTP API calls that hang with 'Accept-Encoding: zstd' ([#17408](https://github.com/opensearch-project/OpenSearch/pull/17408)) From ceddbe07a3c1f2fe28c3548695528fd00497601b Mon Sep 17 00:00:00 2001 From: Rishabh Singh Date: Thu, 27 Feb 2025 15:58:54 -0800 Subject: [PATCH 32/48] add alpha1 qualifier to assemble command in benchmark workflow (#17481) Signed-off-by: Rishabh Singh --- .github/workflows/benchmark-pull-request.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml index 38e12f97d4480..850a3310cbf6c 100644 --- a/.github/workflows/benchmark-pull-request.yml +++ b/.github/workflows/benchmark-pull-request.yml @@ -147,7 +147,7 @@ jobs: distribution: 'temurin' - name: Build and Assemble OpenSearch from PR run: | - ./gradlew :distribution:archives:linux-tar:assemble -Dbuild.snapshot=false + ./gradlew :distribution:archives:linux-tar:assemble -Dbuild.snapshot=false -Dbuild.version_qualifier=alpha1 - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v4 with: From a961ec728859b5318a8c7f80206ff6566a954971 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 27 Feb 2025 19:13:38 -0800 Subject: [PATCH 33/48] Add @bugmakerrrrrr as maintainerrrrrr (#17466) Following the [nomination process][1], I have nominated and other maintainers have agreed to add Pan Guixin (@bugmakerrrrrr) as a co-Maintainer of the OpenSearch repository. Pan Guixin has kindly accepted the invitation. [1]: https://github.com/opensearch-project/.github/blob/main/RESPONSIBILITIES.md#becoming-a-maintainer Signed-off-by: Andrew Ross --- .github/CODEOWNERS | 2 +- MAINTAINERS.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 38ce0c3a3f927..5915365677ca2 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -11,7 +11,7 @@ # 3. Use the command palette to run the CODEOWNERS: Show owners of current file command, which will display all code owners for the current file. # Default ownership for all repo files -* @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jainankitk @kotwanikunal @linuxpi @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +* @anasalkouz @andrross @ashking94 @bugmakerrrrrr @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jainankitk @kotwanikunal @linuxpi @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /modules/lang-painless/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /modules/parent-join/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 887ff654dff96..7906596f047d5 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -25,6 +25,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Michael Froh | [msfroh](https://github.com/msfroh) | Amazon | | Nick Knize | [nknize](https://github.com/nknize) | Lucenia | | Owais Kazi | [owaiskazi19](https://github.com/owaiskazi19) | Amazon | +| Pan Guixin | [bugmakerrrrrr](https://github.com/bugmakerrrrrr) | ByteDance | | Peter Nied | [peternied](https://github.com/peternied) | Amazon | | Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon | | Sachin Kale | [sachinpkale](https://github.com/sachinpkale) | Amazon | From 968eafbd37ef0b864e887643c74291cc3e5ca0d0 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Fri, 28 Feb 2025 14:34:55 -0500 Subject: [PATCH 34/48] Update version to 2_19_1 for serialization of execution hint in CardinalityAggregationBuilder (#17492) Signed-off-by: Craig Perkins --- .../aggregations/metrics/CardinalityAggregationBuilder.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java index f77bbfbd48461..202a6babafec7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java @@ -116,7 +116,7 @@ public CardinalityAggregationBuilder(StreamInput in) throws IOException { if (in.readBoolean()) { precisionThreshold = in.readLong(); } - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_19_1)) { executionHint = in.readOptionalString(); } } @@ -133,7 +133,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { if (hasPrecisionThreshold) { out.writeLong(precisionThreshold); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_19_1)) { out.writeOptionalString(executionHint); } } From bfdf019ab3c9def800b015b95c1bd5aa3a5232a1 Mon Sep 17 00:00:00 2001 From: Wenqi Gao Date: Mon, 3 Mar 2025 12:13:04 -0800 Subject: [PATCH 35/48] Add filter function for AbstractQueryBuilder, BoolQueryBuilder, ConstantScoreQueryBuilder. (#17409) (#17409) * The filter function will combine a filter with the query builder. If the query builder itself has a filter we will combine the filter and return the query builder itself. If no we will use a bool query builder to combine the query builder and the filter and then return the bool query builder. Signed-off-by: Chloe Gao --- CHANGELOG-3.0.md | 1 + .../index/query/AbstractQueryBuilder.java | 24 +++++++++++++++++++ .../index/query/BoolQueryBuilder.java | 12 ++++++---- .../query/ConstantScoreQueryBuilder.java | 16 +++++++++++++ .../opensearch/index/query/QueryBuilder.java | 12 ++++++++++ .../index/query/SpanNearQueryBuilder.java | 5 ++++ .../index/query/BoolQueryBuilderTests.java | 18 +++++++++++++- .../query/ConstantScoreQueryBuilderTests.java | 17 +++++++++++++ .../query/SpanMultiTermQueryBuilderTests.java | 5 ++++ .../test/AbstractQueryTestCase.java | 19 +++++++++++++++ 10 files changed, 123 insertions(+), 6 deletions(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 4c366d0c7714f..1a0f9280136c4 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add execution_hint to cardinality aggregator request (#[17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) - Arrow Flight RPC plugin with Flight server bootstrap logic and client for internode communication ([#16962](https://github.com/opensearch-project/OpenSearch/pull/16962)) - Added offset management for the pull-based Ingestion ([#17354](https://github.com/opensearch-project/OpenSearch/pull/17354)) +- Add filter function for AbstractQueryBuilder, BoolQueryBuilder, ConstantScoreQueryBuilder([#17409](https://github.com/opensearch-project/OpenSearch/pull/17409)) ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) diff --git a/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java index 66c6ee115c3f0..cd133798faa6d 100644 --- a/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java @@ -86,6 +86,30 @@ protected AbstractQueryBuilder(StreamInput in) throws IOException { queryName = in.readOptionalString(); } + /** + * Check the input parameters of filter function. + * @param filter filter to combine with current query builder + * @return true if parameters are valid. Returns false when the filter is null. + */ + public static boolean validateFilterParams(QueryBuilder filter) { + return filter != null; + } + + /** + * Combine filter with current query builder + * @param filter filter to combine with current query builder + * @return query builder with filter combined + */ + public QueryBuilder filter(QueryBuilder filter) { + if (validateFilterParams(filter) == false) { + return this; + } + final BoolQueryBuilder modifiedQB = new BoolQueryBuilder(); + modifiedQB.must(this); + modifiedQB.filter(filter); + return modifiedQB; + } + @Override public final void writeTo(StreamOutput out) throws IOException { out.writeFloat(boost); diff --git a/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java index c44a7ef6a397c..58009f055650b 100644 --- a/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java @@ -135,13 +135,15 @@ public List must() { /** * Adds a query that must appear in the matching documents but will - * not contribute to scoring. No {@code null} value allowed. + * not contribute to scoring. If null value passed, then do nothing and return. + * @param filter the filter to add to the current ConstantScoreQuery + * @return query builder with filter combined */ - public BoolQueryBuilder filter(QueryBuilder queryBuilder) { - if (queryBuilder == null) { - throw new IllegalArgumentException("inner bool query clause cannot be null"); + public BoolQueryBuilder filter(QueryBuilder filter) { + if (validateFilterParams(filter) == false) { + return this; } - filterClauses.add(queryBuilder); + filterClauses.add(filter); return this; } diff --git a/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java index b2764d29da80a..b74224cd5ef22 100644 --- a/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java @@ -101,6 +101,22 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep builder.endObject(); } + /** + * Adds a filter to the current ConstantScoreQuery. + * @param filter the filter to add to the current ConstantScoreQuery + * @return query builder with filter combined + */ + public ConstantScoreQueryBuilder filter(QueryBuilder filter) { + if (validateFilterParams(filter) == false) { + return this; + } + QueryBuilder filteredFilterBuilder = filterBuilder.filter(filter); + if (filteredFilterBuilder != filterBuilder) { + return new ConstantScoreQueryBuilder(filteredFilterBuilder); + } + return this; + } + public static ConstantScoreQueryBuilder fromXContent(XContentParser parser) throws IOException { QueryBuilder query = null; boolean queryFound = false; diff --git a/server/src/main/java/org/opensearch/index/query/QueryBuilder.java b/server/src/main/java/org/opensearch/index/query/QueryBuilder.java index 0cdf7f31c2ebf..f52b393202d28 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/QueryBuilder.java @@ -47,6 +47,18 @@ @PublicApi(since = "1.0.0") public interface QueryBuilder extends NamedWriteable, ToXContentObject, Rewriteable { + /** + * This function combines a filter with a query builder. If the query builder itself has + * a filter we will combine the filter and return the query builder itself. + * If not we will use a bool query builder to combine the query builder and + * the filter and then return the bool query builder. + * If the filter is null we simply return the query builder without any operation. + * + * @param filter The null filter to be added to the existing filter. + * @return A QueryBuilder with the filter added to the existing filter. + */ + QueryBuilder filter(QueryBuilder filter); + /** * Converts this QueryBuilder to a lucene {@link Query}. * Returns {@code null} if this query should be ignored in the context of diff --git a/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java index 179673f500a92..2912a5cb09276 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java @@ -375,6 +375,11 @@ public Query toQuery(QueryShardContext context) throws IOException { throw new UnsupportedOperationException(); } + @Override + public QueryBuilder filter(QueryBuilder filter) { + throw new UnsupportedOperationException("You can't add a filter to a SpanGapQueryBuilder"); + } + @Override public String queryName() { throw new UnsupportedOperationException(); diff --git a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java index a23dff39c6496..f3de666c52932 100644 --- a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java @@ -178,7 +178,6 @@ public void testIllegalArguments() { BoolQueryBuilder booleanQuery = new BoolQueryBuilder(); expectThrows(IllegalArgumentException.class, () -> booleanQuery.must(null)); expectThrows(IllegalArgumentException.class, () -> booleanQuery.mustNot(null)); - expectThrows(IllegalArgumentException.class, () -> booleanQuery.filter(null)); expectThrows(IllegalArgumentException.class, () -> booleanQuery.should(null)); } @@ -326,6 +325,23 @@ public void testFilterNull() throws IOException { assertTrue(builder.filter().isEmpty()); } + /** + * Check if a filter can be applied to the BoolQuery + * @throws IOException + */ + public void testFilter() throws IOException { + // Test for non null filter + String query = "{\"bool\" : {\"filter\" : null } }"; + QueryBuilder filter = QueryBuilders.matchAllQuery(); + BoolQueryBuilder builder = (BoolQueryBuilder) parseQuery(query); + assertFalse(builder.filter(filter).filter().isEmpty()); + assertEquals(builder.filter(filter).filter().get(0), filter); + + // Test for null filter case + builder = (BoolQueryBuilder) parseQuery(query); + assertTrue(builder.filter(null).filter().isEmpty()); + } + /** * test that unknown query names in the clauses throw an error */ diff --git a/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java index 527413d2513d0..cdc61a7f66e9c 100644 --- a/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java @@ -143,4 +143,21 @@ public void testVisit() { assertEquals(2, visitorQueries.size()); } + + public void testFilter() { + // Test for non null filter + BoolQueryBuilder filterBuilder = new BoolQueryBuilder(); + ConstantScoreQueryBuilder constantScoreQueryBuilder = new ConstantScoreQueryBuilder(filterBuilder); + QueryBuilder filter = QueryBuilders.matchAllQuery(); + constantScoreQueryBuilder.filter(filter); + assertEquals(1, filterBuilder.filter().size()); + assertEquals(filter, filterBuilder.filter().get(0)); + + // Test for null filter + filterBuilder = new BoolQueryBuilder(); + constantScoreQueryBuilder = new ConstantScoreQueryBuilder(filterBuilder); + constantScoreQueryBuilder.filter(null); + assertEquals(0, filterBuilder.filter().size()); + + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java index fe8ab7c0765e6..48cd5c0f2f918 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java @@ -182,6 +182,11 @@ public void writeTo(StreamOutput out) throws IOException { public String fieldName() { return "foo"; } + + @Override + public QueryBuilder filter(QueryBuilder filter) { + return this; + } } @Override diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java index afd93e1b72fbb..bffde62b193da 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java @@ -63,7 +63,9 @@ import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.AbstractQueryBuilder; +import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.Rewriteable; @@ -868,4 +870,21 @@ public void testCacheability() throws IOException { assertTrue("query should be cacheable: " + queryBuilder.toString(), context.isCacheable()); } + /** + * Check if a filter can be applied to the abstract query builder. + * @throws UnsupportedOperationException + */ + public void testFilter() throws IOException { + QB queryBuilder = createTestQueryBuilder(); + QueryBuilder filter = QueryBuilders.matchAllQuery(); + // Test for Null Filter case + QueryBuilder returnedQuerybuilder = queryBuilder.filter(null); + assertEquals(queryBuilder, returnedQuerybuilder); + + // Test for non null filter + returnedQuerybuilder = queryBuilder.filter(filter); + assertTrue(returnedQuerybuilder instanceof BoolQueryBuilder); + assertTrue(((BoolQueryBuilder) returnedQuerybuilder).filter().size() == 1); + assertEquals(filter, ((BoolQueryBuilder) returnedQuerybuilder).filter().get(0)); + } } From 21f69cae7667d3666a2d09fb6936e1b04b44b015 Mon Sep 17 00:00:00 2001 From: Xu Xiong Date: Mon, 3 Mar 2025 15:48:14 -0800 Subject: [PATCH 36/48] [Pull-based Ingestion] Add basic NodeStats metrics (#17444) Signed-off-by: xuxiong1 --- .../plugin/kafka/IngestFromKafkaIT.java | 6 + .../stats/TransportClusterStatsAction.java | 7 +- .../admin/indices/stats/ShardStats.java | 23 ++- .../stats/TransportIndicesStatsAction.java | 14 +- .../org/opensearch/index/engine/Engine.java | 8 + .../index/engine/IngestionEngine.java | 6 + .../opensearch/index/shard/IndexShard.java | 5 + .../opensearch/indices/IndicesService.java | 7 +- .../pollingingest/DefaultStreamPoller.java | 12 ++ .../MessageProcessorRunnable.java | 7 + .../pollingingest/PollingIngestStats.java | 175 ++++++++++++++++++ .../indices/pollingingest/StreamPoller.java | 2 + .../cluster/node/stats/NodeStatsTests.java | 2 + .../cluster/stats/ClusterStatsNodesTests.java | 1 + .../stats/ClusterStatsResponseTests.java | 1 + .../TransportRolloverActionTests.java | 2 +- .../shards/CatShardsResponseTests.java | 2 +- .../stats/IndicesStatsResponseTests.java | 2 +- .../opensearch/cluster/DiskUsageTests.java | 4 +- .../index/shard/IndexShardTests.java | 6 +- .../PollingIngestStatsTests.java | 58 ++++++ .../action/cat/RestShardsActionTests.java | 1 + 22 files changed, 340 insertions(+), 11 deletions(-) create mode 100644 server/src/main/java/org/opensearch/indices/pollingingest/PollingIngestStats.java create mode 100644 server/src/test/java/org/opensearch/indices/pollingingest/PollingIngestStatsTests.java diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java index d51569431506a..6fe670d4d5b62 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java @@ -16,6 +16,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.plugins.PluginInfo; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Assert; @@ -75,6 +76,11 @@ public void testKafkaIngestion() { refresh("test"); SearchResponse response = client().prepareSearch("test").setQuery(query).get(); assertThat(response.getHits().getTotalHits().value(), is(1L)); + PollingIngestStats stats = client().admin().indices().prepareStats("test").get().getIndex("test").getShards()[0] + .getPollingIngestStats(); + assertNotNull(stats); + assertThat(stats.getMessageProcessorStats().getTotalProcessedCount(), is(2L)); + assertThat(stats.getConsumerStats().getTotalPolledCount(), is(2L)); }); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index c6581b99eb559..6ea6fe5ea9715 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -56,6 +56,7 @@ import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportRequest; @@ -210,15 +211,18 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq CommitStats commitStats; SeqNoStats seqNoStats; RetentionLeaseStats retentionLeaseStats; + PollingIngestStats pollingIngestStats; try { commitStats = indexShard.commitStats(); seqNoStats = indexShard.seqNoStats(); retentionLeaseStats = indexShard.getRetentionLeaseStats(); + pollingIngestStats = indexShard.pollingIngestStats(); } catch (final AlreadyClosedException e) { // shard is closed - no stats is fine commitStats = null; seqNoStats = null; retentionLeaseStats = null; + pollingIngestStats = null; } shardsStats.add( new ShardStats( @@ -227,7 +231,8 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq new CommonStats(indicesService.getIndicesQueryCache(), indexShard, commonStatsFlags), commitStats, seqNoStats, - retentionLeaseStats + retentionLeaseStats, + pollingIngestStats ) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java index 4ed1ce95b7de2..7c78a903217ab 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.stats; +import org.opensearch.Version; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; @@ -44,6 +45,7 @@ import org.opensearch.index.seqno.RetentionLeaseStats; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.shard.ShardPath; +import org.opensearch.indices.pollingingest.PollingIngestStats; import java.io.IOException; @@ -65,6 +67,9 @@ public class ShardStats implements Writeable, ToXContentFragment { @Nullable private RetentionLeaseStats retentionLeaseStats; + @Nullable + private PollingIngestStats pollingIngestStats; + /** * Gets the current retention lease stats. * @@ -87,6 +92,9 @@ public ShardStats(StreamInput in) throws IOException { isCustomDataPath = in.readBoolean(); seqNoStats = in.readOptionalWriteable(SeqNoStats::new); retentionLeaseStats = in.readOptionalWriteable(RetentionLeaseStats::new); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + pollingIngestStats = in.readOptionalWriteable(PollingIngestStats::new); + } } public ShardStats( @@ -95,7 +103,8 @@ public ShardStats( final CommonStats commonStats, final CommitStats commitStats, final SeqNoStats seqNoStats, - final RetentionLeaseStats retentionLeaseStats + final RetentionLeaseStats retentionLeaseStats, + final PollingIngestStats pollingIngestStats ) { this.shardRouting = routing; this.dataPath = shardPath.getRootDataPath().toString(); @@ -105,6 +114,7 @@ public ShardStats( this.commonStats = commonStats; this.seqNoStats = seqNoStats; this.retentionLeaseStats = retentionLeaseStats; + this.pollingIngestStats = pollingIngestStats; } /** @@ -128,6 +138,11 @@ public SeqNoStats getSeqNoStats() { return this.seqNoStats; } + @Nullable + public PollingIngestStats getPollingIngestStats() { + return this.pollingIngestStats; + } + public String getDataPath() { return dataPath; } @@ -150,6 +165,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isCustomDataPath); out.writeOptionalWriteable(seqNoStats); out.writeOptionalWriteable(retentionLeaseStats); + if (out.getVersion().onOrAfter((Version.V_3_0_0))) { + out.writeOptionalWriteable(pollingIngestStats); + } } @Override @@ -171,6 +189,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (retentionLeaseStats != null) { retentionLeaseStats.toXContent(builder, params); } + if (pollingIngestStats != null) { + pollingIngestStats.toXContent(builder, params); + } builder.startObject(Fields.SHARD_PATH); builder.field(Fields.STATE_PATH, statePath); builder.field(Fields.DATA_PATH, dataPath); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 2b85b6d5d6b5b..baa1dfa2431e6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -52,6 +52,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -141,16 +142,27 @@ protected ShardStats shardOperation(IndicesStatsRequest request, ShardRouting sh CommitStats commitStats; SeqNoStats seqNoStats; RetentionLeaseStats retentionLeaseStats; + PollingIngestStats pollingIngestStats; try { commitStats = indexShard.commitStats(); seqNoStats = indexShard.seqNoStats(); retentionLeaseStats = indexShard.getRetentionLeaseStats(); + pollingIngestStats = indexShard.pollingIngestStats(); } catch (final AlreadyClosedException e) { // shard is closed - no stats is fine commitStats = null; seqNoStats = null; retentionLeaseStats = null; + pollingIngestStats = null; } - return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), commonStats, commitStats, seqNoStats, retentionLeaseStats); + return new ShardStats( + indexShard.routingEntry(), + indexShard.shardPath(), + commonStats, + commitStats, + seqNoStats, + retentionLeaseStats, + pollingIngestStats + ); } } diff --git a/server/src/main/java/org/opensearch/index/engine/Engine.java b/server/src/main/java/org/opensearch/index/engine/Engine.java index db08ea1164f68..92858ffc26902 100644 --- a/server/src/main/java/org/opensearch/index/engine/Engine.java +++ b/server/src/main/java/org/opensearch/index/engine/Engine.java @@ -93,6 +93,7 @@ import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogDeletionPolicy; import org.opensearch.index.translog.TranslogManager; +import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.search.suggest.completion.CompletionStats; import java.io.Closeable; @@ -946,6 +947,13 @@ public SegmentsStats segmentsStats(boolean includeSegmentFileSizes, boolean incl return stats; } + /** + * @return Stats for pull-based ingestion. + */ + public PollingIngestStats pollingIngestStats() { + return null; + } + protected TranslogDeletionPolicy getTranslogDeletionPolicy(EngineConfig engineConfig) { TranslogDeletionPolicy customTranslogDeletionPolicy = null; if (engineConfig.getCustomTranslogDeletionPolicyFactory() != null) { diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java index 72b59ba88b4c2..00feab082c178 100644 --- a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -29,6 +29,7 @@ import org.opensearch.index.translog.TranslogStats; import org.opensearch.index.translog.listener.CompositeTranslogEventListener; import org.opensearch.indices.pollingingest.DefaultStreamPoller; +import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.indices.pollingingest.StreamPoller; import java.io.IOException; @@ -288,4 +289,9 @@ protected TranslogManager createTranslogManager( protected Map commitDataAsMap() { return commitDataAsMap(indexWriter); } + + @Override + public PollingIngestStats pollingIngestStats() { + return streamPoller.getStats(); + } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index f8ad3fc8cf866..bd47a664b729d 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -184,6 +184,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.cluster.IndicesClusterStateService; +import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; import org.opensearch.indices.recovery.RecoveryListener; @@ -1533,6 +1534,10 @@ public CompletionStats completionStats(String... fields) { return getEngine().completionStats(fields); } + public PollingIngestStats pollingIngestStats() { + return getEngine().pollingIngestStats(); + } + /** * Executes the given flush request against the engine. * diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 527c2c23ba6b1..f3b0121dd5c88 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -149,6 +149,7 @@ import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.pollingingest.IngestionEngineFactory; +import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoverySettings; @@ -758,15 +759,18 @@ IndexShardStats indexShardStats(final IndicesService indicesService, final Index CommitStats commitStats; SeqNoStats seqNoStats; RetentionLeaseStats retentionLeaseStats; + PollingIngestStats pollingIngestStats; try { commitStats = indexShard.commitStats(); seqNoStats = indexShard.seqNoStats(); retentionLeaseStats = indexShard.getRetentionLeaseStats(); + pollingIngestStats = indexShard.pollingIngestStats(); } catch (AlreadyClosedException e) { // shard is closed - no stats is fine commitStats = null; seqNoStats = null; retentionLeaseStats = null; + pollingIngestStats = null; } return new IndexShardStats( @@ -778,7 +782,8 @@ IndexShardStats indexShardStats(final IndicesService indicesService, final Index new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), commitStats, seqNoStats, - retentionLeaseStats + retentionLeaseStats, + pollingIngestStats ) } ); } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java index 884cffec4aad5..3dfd77f75c82d 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.Nullable; +import org.opensearch.common.metrics.CounterMetric; import org.opensearch.index.IngestionShardConsumer; import org.opensearch.index.IngestionShardPointer; import org.opensearch.index.Message; @@ -60,6 +61,8 @@ public class DefaultStreamPoller implements StreamPoller { private MessageProcessorRunnable processorRunnable; + private final CounterMetric totalPolledCount = new CounterMetric(); + // A pointer to the max persisted pointer for optimizing the check @Nullable private IngestionShardPointer maxPersistedPointer; @@ -204,6 +207,7 @@ protected void startPoll() { logger.info("Skipping message with pointer {} as it is already processed", result.getPointer().asString()); continue; } + totalPolledCount.inc(); blockingQueue.put(result); logger.debug( "Put message {} with pointer {} to the blocking queue", @@ -297,6 +301,14 @@ public IngestionShardPointer getBatchStartPointer() { return batchStartPointer; } + @Override + public PollingIngestStats getStats() { + PollingIngestStats.Builder builder = new PollingIngestStats.Builder(); + builder.setTotalPolledCount(totalPolledCount.count()); + builder.setTotalProcessedCount(processorRunnable.getStats().count()); + return builder.build(); + } + public State getState() { return state; } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java index 53f9353477869..0c06ebc558466 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java @@ -14,6 +14,7 @@ import org.apache.lucene.index.Term; import org.opensearch.action.DocWriteRequest; import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesArray; @@ -48,6 +49,7 @@ public class MessageProcessorRunnable implements Runnable { private final BlockingQueue> blockingQueue; private final MessageProcessor messageProcessor; + private final CounterMetric stats = new CounterMetric(); private static final String ID = "_id"; private static final String OP_TYPE = "_op_type"; @@ -229,8 +231,13 @@ public void run() { Thread.currentThread().interrupt(); // Restore interrupt status } if (result != null) { + stats.inc(); messageProcessor.process(result.getMessage(), result.getPointer()); } } } + + public CounterMetric getStats() { + return stats; + } } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/PollingIngestStats.java b/server/src/main/java/org/opensearch/indices/pollingingest/PollingIngestStats.java new file mode 100644 index 0000000000000..cda706b29083a --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/pollingingest/PollingIngestStats.java @@ -0,0 +1,175 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Stats for pull-based ingestion + */ +@ExperimentalApi +public class PollingIngestStats implements Writeable, ToXContentFragment { + private final MessageProcessorStats messageProcessorStats; + private final ConsumerStats consumerStats; + // TODO: add error stats from error handling sink + + public PollingIngestStats(MessageProcessorStats messageProcessorStats, ConsumerStats consumerStats) { + this.messageProcessorStats = messageProcessorStats; + this.consumerStats = consumerStats; + } + + public PollingIngestStats(StreamInput in) throws IOException { + long totalProcessedCount = in.readLong(); + this.messageProcessorStats = new MessageProcessorStats(totalProcessedCount); + long totalPolledCount = in.readLong(); + this.consumerStats = new ConsumerStats(totalPolledCount); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(messageProcessorStats.getTotalProcessedCount()); + out.writeLong(consumerStats.getTotalPolledCount()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("polling_ingest_stats"); + builder.startObject("message_processor_stats"); + builder.field("total_processed_count", messageProcessorStats.getTotalProcessedCount()); + builder.endObject(); + builder.startObject("consumer_stats"); + builder.field("total_polled_count", consumerStats.getTotalPolledCount()); + builder.endObject(); + builder.endObject(); + return builder; + } + + public MessageProcessorStats getMessageProcessorStats() { + return messageProcessorStats; + } + + public ConsumerStats getConsumerStats() { + return consumerStats; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof PollingIngestStats)) return false; + PollingIngestStats that = (PollingIngestStats) o; + return Objects.equals(messageProcessorStats, that.messageProcessorStats) && Objects.equals(consumerStats, that.consumerStats); + } + + @Override + public int hashCode() { + return Objects.hash(messageProcessorStats, consumerStats); + } + + /** + * Stats for message processor + */ + @ExperimentalApi + public static class MessageProcessorStats { + private final long totalProcessedCount; + + public MessageProcessorStats(long totalProcessedCount) { + this.totalProcessedCount = totalProcessedCount; + } + + public long getTotalProcessedCount() { + return totalProcessedCount; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MessageProcessorStats)) return false; + MessageProcessorStats that = (MessageProcessorStats) o; + return totalProcessedCount == that.totalProcessedCount; + } + + @Override + public int hashCode() { + return Objects.hash(totalProcessedCount); + } + } + + /** + * Stats for consumer (poller) + */ + @ExperimentalApi + public static class ConsumerStats { + private final long totalPolledCount; + + public ConsumerStats(long totalPolledCount) { + this.totalPolledCount = totalPolledCount; + } + + public long getTotalPolledCount() { + return totalPolledCount; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof ConsumerStats)) return false; + ConsumerStats that = (ConsumerStats) o; + return totalPolledCount == that.totalPolledCount; + } + + @Override + public int hashCode() { + return Objects.hash(totalPolledCount); + } + } + + /** + * Builder for {@link PollingIngestStats} + */ + @ExperimentalApi + public static class Builder { + private long totalProcessedCount; + private long totalPolledCount; + + public Builder() {} + + public Builder setTotalProcessedCount(long totalProcessedCount) { + this.totalProcessedCount = totalProcessedCount; + return this; + } + + public Builder setTotalPolledCount(long totalPolledCount) { + this.totalPolledCount = totalPolledCount; + return this; + } + + public PollingIngestStats build() { + MessageProcessorStats messageProcessorStats = new MessageProcessorStats(totalProcessedCount); + ConsumerStats consumerStats = new ConsumerStats(totalPolledCount); + return new PollingIngestStats(messageProcessorStats, consumerStats); + } + } + + /** + * Returns a new builder for creating a {@link PollingIngestStats} instance. + * + * @return a new {@code Builder} instance + */ + public static Builder builder() { + return new Builder(); + } +} diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java index 5010982991ecc..15e1745433df2 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java @@ -50,6 +50,8 @@ public interface StreamPoller extends Closeable { */ IngestionShardPointer getBatchStartPointer(); + PollingIngestStats getStats(); + /** * a state to indicate the current state of the poller */ diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index 34065daff2b8a..cccca0448a2cc 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -1413,6 +1413,7 @@ private HashMap> createRandomShardByStats(List shardStatsList = new ArrayList<>(); @@ -1464,6 +1465,7 @@ public MockNodeIndicesStats generateMockNodeIndicesStats( commonStats, null, null, + null, null ); IndexShardStats indexShardStats = new IndexShardStats(shardRouting.shardId(), new ShardStats[] { shardStats }); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java index 823661ba14abf..58d789b704a38 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java @@ -402,6 +402,7 @@ private ShardStats[] createshardStats(DiscoveryNode localNode, Index index, Comm commonStats, null, null, + null, null ); shardStatsList.add(shardStats); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponseTests.java index ad7706292d93c..193c9cc471f7b 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponseTests.java @@ -270,6 +270,7 @@ private ShardStats[] createShardStats(DiscoveryNode localNode, Index index, Comm commonStats, null, null, + null, null ); shardStatsList.add(shardStats); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java index 724c919f65375..6cef1049b3b50 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -422,7 +422,7 @@ public static IndicesStatsResponse randomIndicesStatsResponse(final IndexMetadat stats.get = new GetStats(); stats.flush = new FlushStats(); stats.warmer = new WarmerStats(); - shardStats.add(new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null, null, null)); + shardStats.add(new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null, null, null, null)); } } return IndicesStatsTests.newIndicesStatsResponse( diff --git a/server/src/test/java/org/opensearch/action/admin/indices/shards/CatShardsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/shards/CatShardsResponseTests.java index 11b1d5567d9fb..00d4a311dca1d 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/shards/CatShardsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/shards/CatShardsResponseTests.java @@ -152,7 +152,7 @@ private IndicesStatsResponse getIndicesStatsResponse() { Path path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve(String.valueOf(shardId)); ShardPath shardPath = new ShardPath(false, path, path, shId); ShardRouting routing = createShardRouting(shId, (shardId == 0)); - shards.add(new ShardStats(routing, shardPath, new CommonStats(), null, null, null)); + shards.add(new ShardStats(routing, shardPath, new CommonStats(), null, null, null, null)); } } return new IndicesStatsResponse(shards.toArray(new ShardStats[0]), 0, 0, 0, null); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java index 2b79e523fc620..421646f0812fe 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java @@ -88,7 +88,7 @@ public void testGetIndices() { Path path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve(String.valueOf(shardId)); ShardPath shardPath = new ShardPath(false, path, path, shId); ShardRouting routing = createShardRouting(index, shId, (shardId == 0)); - shards.add(new ShardStats(routing, shardPath, null, null, null, null)); + shards.add(new ShardStats(routing, shardPath, null, null, null, null, null)); AtomicLong primaryShardsCounter = expectedIndexToPrimaryShardsCount.computeIfAbsent( index.getName(), k -> new AtomicLong(0L) diff --git a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java index cd050fb346563..d790d95757b02 100644 --- a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java @@ -135,8 +135,8 @@ public void testFillShardLevelInfo() { CommonStats commonStats1 = new CommonStats(); commonStats1.store = new StoreStats(1000, 0L); ShardStats[] stats = new ShardStats[] { - new ShardStats(test_0, new ShardPath(false, test0Path, test0Path, test_0.shardId()), commonStats0, null, null, null), - new ShardStats(test_1, new ShardPath(false, test1Path, test1Path, test_1.shardId()), commonStats1, null, null, null) }; + new ShardStats(test_0, new ShardPath(false, test0Path, test0Path, test_0.shardId()), commonStats0, null, null, null, null), + new ShardStats(test_1, new ShardPath(false, test1Path, test1Path, test_1.shardId()), commonStats1, null, null, null, null) }; final Map shardSizes = new HashMap<>(); final Map routingToPath = new HashMap<>(); InternalClusterInfoService.buildShardLevelInfo(logger, stats, shardSizes, routingToPath, new HashMap<>()); diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 7614a54da52bf..9fc779891b810 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -1696,7 +1696,8 @@ public void testShardStats() throws IOException { new CommonStats(new IndicesQueryCache(Settings.EMPTY), shard, new CommonStatsFlags()), shard.commitStats(), shard.seqNoStats(), - shard.getRetentionLeaseStats() + shard.getRetentionLeaseStats(), + shard.pollingIngestStats() ); assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath()); assertEquals(shard.shardPath().getRootStatePath().toString(), stats.getStatePath()); @@ -1838,7 +1839,8 @@ public void testShardStatsWithRemoteStoreEnabled() throws IOException { new CommonStats(new IndicesQueryCache(Settings.EMPTY), shard, new CommonStatsFlags()), shard.commitStats(), shard.seqNoStats(), - shard.getRetentionLeaseStats() + shard.getRetentionLeaseStats(), + shard.pollingIngestStats() ); RemoteSegmentStats remoteSegmentStats = shardStats.getStats().getSegments().getRemoteSegmentStats(); assertRemoteSegmentStats(remoteSegmentTransferTracker, remoteSegmentStats); diff --git a/server/src/test/java/org/opensearch/indices/pollingingest/PollingIngestStatsTests.java b/server/src/test/java/org/opensearch/indices/pollingingest/PollingIngestStatsTests.java new file mode 100644 index 0000000000000..d64f350239013 --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/pollingingest/PollingIngestStatsTests.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class PollingIngestStatsTests extends OpenSearchTestCase { + + public void testToXContent() throws IOException { + PollingIngestStats stats = createTestInstance(); + + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); + builder.startObject(); + stats.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + String expected = "{\"polling_ingest_stats\":{\"message_processor_stats\":{\"total_processed_count\":" + + stats.getMessageProcessorStats().getTotalProcessedCount() + + "},\"consumer_stats\":{\"total_polled_count\":" + + stats.getConsumerStats().getTotalPolledCount() + + "}}}"; + + assertEquals(expected, builder.toString()); + } + + public void testSerialization() throws IOException { + PollingIngestStats original = createTestInstance(); + + try (BytesStreamOutput output = new BytesStreamOutput()) { + original.writeTo(output); + + try (StreamInput input = output.bytes().streamInput()) { + PollingIngestStats deserialized = new PollingIngestStats(input); + assertEquals(original, deserialized); + } + } + } + + private PollingIngestStats createTestInstance() { + return PollingIngestStats.builder() + .setTotalProcessedCount(randomNonNegativeLong()) + .setTotalPolledCount(randomNonNegativeLong()) + .build(); + } +} diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java index c412167a10c75..53a5cec1332fb 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java @@ -93,6 +93,7 @@ public void setup() { commonStats, null, null, + null, null ); shardStatsMap.put(shardRouting, shardStats); From 218f353e821f5f641443bc1ffa1dc8ea73818709 Mon Sep 17 00:00:00 2001 From: Sandesh Kumar Date: Tue, 4 Mar 2025 08:28:13 +0530 Subject: [PATCH 37/48] [Star Tree] [Search] Keyword & Numeric Terms Aggregation (#17165) --------- Signed-off-by: Sandesh Kumar --- CHANGELOG-3.0.md | 1 + .../bucket/BucketsAggregator.java | 6 + .../histogram/DateHistogramAggregator.java | 50 +-- .../GlobalOrdinalsStringTermsAggregator.java | 81 ++++- .../bucket/terms/NumericTermsAggregator.java | 82 ++++- .../bucket/terms/TermsAggregator.java | 7 +- .../search/startree/StarTreeQueryContext.java | 33 ++ .../search/startree/StarTreeQueryHelper.java | 33 ++ .../search/SearchServiceStarTreeTests.java | 150 ++++++++ .../startree/KeywordTermsAggregatorTests.java | 245 +++++++++++++ .../startree/NumericTermsAggregatorTests.java | 342 ++++++++++++++++++ .../aggregations/AggregatorTestCase.java | 3 + 12 files changed, 986 insertions(+), 47 deletions(-) create mode 100644 server/src/test/java/org/opensearch/search/aggregations/startree/KeywordTermsAggregatorTests.java create mode 100644 server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 1a0f9280136c4..7211368c65ffb 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -22,6 +22,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Arrow Flight RPC plugin with Flight server bootstrap logic and client for internode communication ([#16962](https://github.com/opensearch-project/OpenSearch/pull/16962)) - Added offset management for the pull-based Ingestion ([#17354](https://github.com/opensearch-project/OpenSearch/pull/17354)) - Add filter function for AbstractQueryBuilder, BoolQueryBuilder, ConstantScoreQueryBuilder([#17409](https://github.com/opensearch-project/OpenSearch/pull/17409)) +- [Star Tree] [Search] Resolving keyword & numeric bucket aggregation with metric aggregation using star-tree ([#17165](https://github.com/opensearch-project/OpenSearch/pull/17165)) ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java index f075d67b0f48d..a65728b2d658a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java @@ -136,6 +136,12 @@ public final void collectExistingBucket(LeafBucketCollector subCollector, int do */ public final void collectStarTreeBucket(StarTreeBucketCollector collector, long docCount, long bucketOrd, int entryBit) throws IOException { + if (bucketOrd < 0) { + bucketOrd = -1 - bucketOrd; + } else { + grow(bucketOrd + 1); + } + if (docCounts.increment(bucketOrd, docCount) == docCount) { multiBucketConsumer.accept(0); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 2294ba6f9a2b5..d825b33a0f150 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -33,18 +33,14 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; -import org.apache.lucene.util.FixedBitSet; import org.opensearch.common.Nullable; import org.opensearch.common.Rounding; import org.opensearch.common.lease.Releasables; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.compositeindex.datacube.DateDimension; -import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; -import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils; import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter; import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitRounding; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; @@ -192,9 +188,9 @@ public ScoreMode scoreMode() { protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); if (supportedStarTree != null) { - if (preComputeWithStarTree(ctx, supportedStarTree) == true) { - return true; - } + StarTreeBucketCollector starTreeBucketCollector = getStarTreeBucketCollector(ctx, supportedStarTree, null); + StarTreeQueryHelper.preComputeBucketsWithStarTree(starTreeBucketCollector); + return true; } return filterRewriteOptimizationContext.tryOptimize(ctx, this::incrementBucketDocCount, segmentMatchAll(context, ctx)); } @@ -268,6 +264,10 @@ public StarTreeBucketCollector getStarTreeBucketCollector( ) throws IOException { assert parentCollector == null; StarTreeValues starTreeValues = StarTreeQueryHelper.getStarTreeValues(ctx, starTree); + SortedNumericStarTreeValuesIterator valuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues + .getDimensionValuesIterator(starTreeDateDimension); + SortedNumericStarTreeValuesIterator docCountsIterator = StarTreeQueryHelper.getDocCountsIterator(starTreeValues, starTree); + return new StarTreeBucketCollector( starTreeValues, StarTreeTraversalUtil.getStarTreeResult( @@ -287,17 +287,6 @@ public void setSubCollectors() throws IOException { } } - SortedNumericStarTreeValuesIterator valuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues - .getDimensionValuesIterator(starTreeDateDimension); - - String metricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( - starTree.getField(), - "_doc_count", - MetricStat.DOC_COUNT.getTypeName() - ); - SortedNumericStarTreeValuesIterator docCountsIterator = (SortedNumericStarTreeValuesIterator) starTreeValues - .getMetricValuesIterator(metricName); - @Override public void collectStarTreeEntry(int starTreeEntry, long owningBucketOrd) throws IOException { if (!valuesIterator.advanceExact(starTreeEntry)) { @@ -311,15 +300,8 @@ public void collectStarTreeEntry(int starTreeEntry, long owningBucketOrd) throws if (docCountsIterator.advanceExact(starTreeEntry)) { long metricValue = docCountsIterator.nextValue(); - long bucketOrd = bucketOrds.add(owningBucketOrd, dimensionValue); - if (bucketOrd < 0) { - bucketOrd = -1 - bucketOrd; - collectStarTreeBucket(this, metricValue, bucketOrd, starTreeEntry); - } else { - grow(bucketOrd + 1); - collectStarTreeBucket(this, metricValue, bucketOrd, starTreeEntry); - } + collectStarTreeBucket(this, metricValue, bucketOrd, starTreeEntry); } } } @@ -393,20 +375,4 @@ public double bucketSize(long bucket, Rounding.DateTimeUnit unitSize) { return 1.0; } } - - private boolean preComputeWithStarTree(LeafReaderContext ctx, CompositeIndexFieldInfo starTree) throws IOException { - StarTreeBucketCollector starTreeBucketCollector = getStarTreeBucketCollector(ctx, starTree, null); - FixedBitSet matchingDocsBitSet = starTreeBucketCollector.getMatchingDocsBitSet(); - - int numBits = matchingDocsBitSet.length(); - - if (numBits > 0) { - for (int bit = matchingDocsBitSet.nextSetBit(0); bit != DocIdSetIterator.NO_MORE_DOCS; bit = (bit + 1 < numBits) - ? matchingDocsBitSet.nextSetBit(bit + 1) - : DocIdSetIterator.NO_MORE_DOCS) { - starTreeBucketCollector.collectStarTreeEntry(bit, 0); - } - } - return true; - } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index ef925b7f6416a..d8ec9feaf44b4 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -51,6 +51,10 @@ import org.opensearch.common.util.LongHash; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedSetStarTreeValuesIterator; import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.AggregationExecutionException; @@ -63,14 +67,20 @@ import org.opensearch.search.aggregations.InternalOrder; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; +import org.opensearch.search.aggregations.StarTreeBucketCollector; +import org.opensearch.search.aggregations.StarTreePreComputeCollector; import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds; import org.opensearch.search.aggregations.bucket.terms.SignificanceLookup.BackgroundFrequencyForBytes; import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeQueryHelper; +import org.opensearch.search.startree.StarTreeTraversalUtil; +import org.opensearch.search.startree.filter.DimensionFilter; import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.Map; import java.util.function.BiConsumer; import java.util.function.Function; @@ -85,18 +95,19 @@ * * @opensearch.internal */ -public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggregator { +public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggregator implements StarTreePreComputeCollector { protected final ResultStrategy resultStrategy; protected final ValuesSource.Bytes.WithOrdinals valuesSource; private final LongPredicate acceptedGlobalOrdinals; private final long valueCount; - private final String fieldName; + protected final String fieldName; private Weight weight; protected final CollectionStrategy collectionStrategy; private final SetOnce dvs = new SetOnce<>(); protected int segmentsWithSingleValuedOrds = 0; protected int segmentsWithMultiValuedOrds = 0; + LongUnaryOperator globalOperator; /** * Lookup global ordinals @@ -219,6 +230,9 @@ boolean tryCollectFromTermFrequencies(LeafReaderContext ctx, SortedSetDocValues @Override protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { SortedSetDocValues globalOrds = valuesSource.globalOrdinalsValues(ctx); + if (tryStarTreePrecompute(ctx) == true) { + return true; + } if (collectionStrategy instanceof DenseGlobalOrds && this.resultStrategy instanceof StandardTermsResults && subAggregators.length == 0) { @@ -231,6 +245,17 @@ protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws return false; } + protected boolean tryStarTreePrecompute(LeafReaderContext ctx) throws IOException { + CompositeIndexFieldInfo supportedStarTree = StarTreeQueryHelper.getSupportedStarTree(this.context.getQueryShardContext()); + if (supportedStarTree != null) { + globalOperator = valuesSource.globalOrdinalsMapping(ctx); + StarTreeBucketCollector starTreeBucketCollector = getStarTreeBucketCollector(ctx, supportedStarTree, null); + StarTreeQueryHelper.preComputeBucketsWithStarTree(starTreeBucketCollector); + return true; + } + return false; + } + @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { SortedSetDocValues globalOrds = valuesSource.globalOrdinalsValues(ctx); @@ -307,6 +332,56 @@ public void collect(int doc, long owningBucketOrd) throws IOException { }); } + public StarTreeBucketCollector getStarTreeBucketCollector( + LeafReaderContext ctx, + CompositeIndexFieldInfo starTree, + StarTreeBucketCollector parent + ) throws IOException { + assert parent == null; + StarTreeValues starTreeValues = StarTreeQueryHelper.getStarTreeValues(ctx, starTree); + SortedSetStarTreeValuesIterator valuesIterator = (SortedSetStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator( + fieldName + ); + SortedNumericStarTreeValuesIterator docCountsIterator = StarTreeQueryHelper.getDocCountsIterator(starTreeValues, starTree); + + return new StarTreeBucketCollector( + starTreeValues, + StarTreeTraversalUtil.getStarTreeResult( + starTreeValues, + StarTreeQueryHelper.mergeDimensionFilterIfNotExists( + context.getQueryShardContext().getStarTreeQueryContext().getBaseQueryStarTreeFilter(), + fieldName, + List.of(DimensionFilter.MATCH_ALL_DEFAULT) + ), + context + ) + ) { + @Override + public void setSubCollectors() throws IOException { + for (Aggregator aggregator : subAggregators) { + this.subCollectors.add(((StarTreePreComputeCollector) aggregator).getStarTreeBucketCollector(ctx, starTree, this)); + } + } + + @Override + public void collectStarTreeEntry(int starTreeEntry, long owningBucketOrd) throws IOException { + if (valuesIterator.advanceExact(starTreeEntry) == false) { + return; + } + for (int i = 0, count = valuesIterator.docValueCount(); i < count; i++) { + long dimensionValue = valuesIterator.value(); + long ord = globalOperator.applyAsLong(dimensionValue); + + if (docCountsIterator.advanceExact(starTreeEntry)) { + long metricValue = docCountsIterator.nextValue(); + long bucketOrd = collectionStrategy.globalOrdToBucketOrd(0, ord); + collectStarTreeBucket(this, metricValue, bucketOrd, starTreeEntry); + } + } + } + }; + } + @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { return resultStrategy.buildAggregations(owningBucketOrds); @@ -444,7 +519,7 @@ protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws (ord, docCount) -> incrementBucketDocCount(mapping.applyAsLong(ord), docCount) ); } - return false; + return tryStarTreePrecompute(ctx); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java index 1d78a59a563f0..bcdea9fb4af3c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java @@ -41,7 +41,11 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.LongArray; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; import org.opensearch.index.fielddata.FieldData; +import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -52,6 +56,8 @@ import org.opensearch.search.aggregations.InternalOrder; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; +import org.opensearch.search.aggregations.StarTreeBucketCollector; +import org.opensearch.search.aggregations.StarTreePreComputeCollector; import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude.LongFilter; import org.opensearch.search.aggregations.bucket.terms.LongKeyedBucketOrds.BucketOrdsEnum; @@ -60,6 +66,9 @@ import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeQueryHelper; +import org.opensearch.search.startree.StarTreeTraversalUtil; +import org.opensearch.search.startree.filter.DimensionFilter; import java.io.IOException; import java.math.BigInteger; @@ -79,11 +88,12 @@ * * @opensearch.internal */ -public class NumericTermsAggregator extends TermsAggregator { +public class NumericTermsAggregator extends TermsAggregator implements StarTreePreComputeCollector { private final ResultStrategy resultStrategy; private final ValuesSource.Numeric valuesSource; private final LongKeyedBucketOrds bucketOrds; private final LongFilter longFilter; + private final String fieldName; public NumericTermsAggregator( String name, @@ -105,6 +115,9 @@ public NumericTermsAggregator( this.valuesSource = valuesSource; this.longFilter = longFilter; bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), cardinality); + this.fieldName = (this.valuesSource instanceof ValuesSource.Numeric.FieldData) + ? ((ValuesSource.Numeric.FieldData) valuesSource).getIndexFieldName() + : null; } @Override @@ -146,6 +159,73 @@ public void collect(int doc, long owningBucketOrd) throws IOException { }); } + protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { + CompositeIndexFieldInfo supportedStarTree = StarTreeQueryHelper.getSupportedStarTree(this.context.getQueryShardContext()); + if (supportedStarTree != null) { + StarTreeBucketCollector starTreeBucketCollector = getStarTreeBucketCollector(ctx, supportedStarTree, null); + StarTreeQueryHelper.preComputeBucketsWithStarTree(starTreeBucketCollector); + return true; + } + return false; + } + + public StarTreeBucketCollector getStarTreeBucketCollector( + LeafReaderContext ctx, + CompositeIndexFieldInfo starTree, + StarTreeBucketCollector parent + ) throws IOException { + assert parent == null; + StarTreeValues starTreeValues = StarTreeQueryHelper.getStarTreeValues(ctx, starTree); + SortedNumericStarTreeValuesIterator valuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues + .getDimensionValuesIterator(fieldName); + SortedNumericStarTreeValuesIterator docCountsIterator = StarTreeQueryHelper.getDocCountsIterator(starTreeValues, starTree); + + return new StarTreeBucketCollector( + starTreeValues, + StarTreeTraversalUtil.getStarTreeResult( + starTreeValues, + StarTreeQueryHelper.mergeDimensionFilterIfNotExists( + context.getQueryShardContext().getStarTreeQueryContext().getBaseQueryStarTreeFilter(), + fieldName, + List.of(DimensionFilter.MATCH_ALL_DEFAULT) + ), + context + ) + ) { + @Override + public void setSubCollectors() throws IOException { + for (Aggregator aggregator : subAggregators) { + this.subCollectors.add(((StarTreePreComputeCollector) aggregator).getStarTreeBucketCollector(ctx, starTree, this)); + } + } + + @Override + public void collectStarTreeEntry(int starTreeEntry, long owningBucketOrd) throws IOException { + if (valuesIterator.advanceExact(starTreeEntry) == false) { + return; + } + long dimensionValue = valuesIterator.nextValue(); + // Only numeric & floating points are supported as of now in star-tree + // TODO: Add support for isBigInteger() when it gets supported in star-tree + if (valuesSource.isFloatingPoint()) { + double doubleValue = ((NumberFieldMapper.NumberFieldType) context.mapperService().fieldType(fieldName)).toDoubleValue( + dimensionValue + ); + dimensionValue = NumericUtils.doubleToSortableLong(doubleValue); + } + + for (int i = 0, count = valuesIterator.entryValueCount(); i < count; i++) { + + if (docCountsIterator.advanceExact(starTreeEntry)) { + long metricValue = docCountsIterator.nextValue(); + long bucketOrd = bucketOrds.add(owningBucketOrd, dimensionValue); + collectStarTreeBucket(this, metricValue, bucketOrd, starTreeEntry); + } + } + } + }; + } + @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { return resultStrategy.buildAggregations(owningBucketOrds); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java index 918cc0276ed13..1ea78e08b91af 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java @@ -291,6 +291,11 @@ private boolean subAggsNeedScore() { @Override protected boolean shouldDefer(Aggregator aggregator) { - return collectMode == SubAggCollectionMode.BREADTH_FIRST && !aggsUsedForSorting.contains(aggregator); + if (context.getQueryShardContext().getStarTreeQueryContext() == null) { + return collectMode == SubAggCollectionMode.BREADTH_FIRST && !aggsUsedForSorting.contains(aggregator); + } else { + // when pre-computing using star-tree - return false (don't defer) for BREADTH_FIRST case + return collectMode != SubAggCollectionMode.BREADTH_FIRST; + } } } diff --git a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java index ca0ab9ce52f6e..a8f54f5793551 100644 --- a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java +++ b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java @@ -21,6 +21,7 @@ import org.opensearch.index.query.QueryBuilder; import org.opensearch.search.aggregations.AggregatorFactory; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory; import org.opensearch.search.aggregations.metrics.MetricAggregatorFactory; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.startree.filter.StarTreeFilter; @@ -113,6 +114,13 @@ public boolean consolidateAllFilters(SearchContext context) { if (validateDateHistogramSupport(compositeMappedFieldType, aggregatorFactory)) { continue; } + + // validation for terms aggregation + if (validateKeywordTermsAggregationSupport(compositeMappedFieldType, aggregatorFactory)) { + continue; + } + + // invalid query shape return false; } @@ -151,6 +159,31 @@ private static boolean validateStarTreeMetricSupport( return false; } + private static boolean validateKeywordTermsAggregationSupport( + CompositeDataCubeFieldType compositeIndexFieldInfo, + AggregatorFactory aggregatorFactory + ) { + if (!(aggregatorFactory instanceof TermsAggregatorFactory termsAggregatorFactory)) { + return false; + } + + // Validate request field is part of dimensions + if (compositeIndexFieldInfo.getDimensions() + .stream() + .map(Dimension::getField) + .noneMatch(termsAggregatorFactory.getField()::equals)) { + return false; + } + + // Validate all sub-factories + for (AggregatorFactory subFactory : aggregatorFactory.getSubFactories().getFactories()) { + if (!validateStarTreeMetricSupport(compositeIndexFieldInfo, subFactory)) { + return false; + } + } + return true; + } + private StarTreeFilter getStarTreeFilter( SearchContext context, QueryBuilder queryBuilder, diff --git a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java index 0e3bc220461b9..68a613a373edf 100644 --- a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java +++ b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java @@ -16,9 +16,11 @@ import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.codec.composite.CompositeIndexReader; import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.aggregations.StarTreeBucketCollector; import org.opensearch.search.aggregations.support.ValuesSource; @@ -177,6 +179,37 @@ public void collectStarTreeEntry(int starTreeEntryBit, long bucket) throws IOExc }; } + /** + * Fetches the metric values iterator for document counts from StarTreeValues. + */ + public static SortedNumericStarTreeValuesIterator getDocCountsIterator( + StarTreeValues starTreeValues, + CompositeIndexFieldInfo starTree + ) { + String metricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( + starTree.getField(), + DocCountFieldMapper.NAME, + MetricStat.DOC_COUNT.getTypeName() + ); + return (SortedNumericStarTreeValuesIterator) starTreeValues.getMetricValuesIterator(metricName); + } + + /** + * For a StarTreeBucketCollector, get matching star-tree entries and update relevant buckets in aggregator + */ + public static void preComputeBucketsWithStarTree(StarTreeBucketCollector starTreeBucketCollector) throws IOException { + FixedBitSet matchingDocsBitSet = starTreeBucketCollector.getMatchingDocsBitSet(); + int numBits = matchingDocsBitSet.length(); + + if (numBits > 0) { + for (int bit = matchingDocsBitSet.nextSetBit(0); bit != DocIdSetIterator.NO_MORE_DOCS; bit = (bit + 1 < numBits) + ? matchingDocsBitSet.nextSetBit(bit + 1) + : DocIdSetIterator.NO_MORE_DOCS) { + starTreeBucketCollector.collectStarTreeEntry(bit, 0); + } + } + } + public static StarTreeFilter mergeDimensionFilterIfNotExists( StarTreeFilter baseStarTreeFilter, String dimensionToMerge, diff --git a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java index 93b133c0302c9..95c877bfce0a8 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java @@ -47,10 +47,12 @@ import org.opensearch.search.aggregations.SearchContextAggregations; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; import org.opensearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregationBuilder; import org.opensearch.search.aggregations.metrics.SumAggregationBuilder; import org.opensearch.search.aggregations.startree.DateHistogramAggregatorTests; +import org.opensearch.search.aggregations.startree.NumericTermsAggregatorTests; import org.opensearch.search.aggregations.startree.StarTreeFilterTests; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.search.builder.SearchSourceBuilder; @@ -70,6 +72,7 @@ import static org.opensearch.search.aggregations.AggregationBuilders.max; import static org.opensearch.search.aggregations.AggregationBuilders.medianAbsoluteDeviation; import static org.opensearch.search.aggregations.AggregationBuilders.sum; +import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.nullValue; import static org.mockito.Mockito.mock; @@ -539,6 +542,153 @@ public void testInvalidQueryParsingForDateHistogramAggregations() throws IOExcep setStarTreeIndexSetting(null); } + /** + * Test query parsing for bucket aggregations, with/without numeric term query + */ + public void testQueryParsingForBucketAggregations() throws IOException { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + setStarTreeIndexSetting("true"); + + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) + .build(); + CreateIndexRequestBuilder builder = client().admin() + .indices() + .prepareCreate("test") + .setSettings(settings) + .setMapping(NumericTermsAggregatorTests.getExpandedMapping(1, false)); + createIndex("test", builder); + + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test")); + IndexShard indexShard = indexService.getShard(0); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(true), + indexShard.shardId(), + 1, + new AliasFilter(null, Strings.EMPTY_ARRAY), + 1.0f, + -1, + null, + null + ); + String KEYWORD_FIELD = "clientip"; + String NUMERIC_FIELD = "size"; + + MaxAggregationBuilder maxAggNoSub = max("max").field(FIELD_NAME); + MaxAggregationBuilder sumAggNoSub = max("sum").field(FIELD_NAME); + SumAggregationBuilder sumAggSub = sum("sum").field(FIELD_NAME).subAggregation(maxAggNoSub); + MedianAbsoluteDeviationAggregationBuilder medianAgg = medianAbsoluteDeviation("median").field(FIELD_NAME); + + QueryBuilder baseQuery; + SearchContext searchContext = createSearchContext(indexService); + StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration( + 1, + Collections.emptySet(), + StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP + ); + + // Case 1: MatchAllQuery and non-nested metric aggregations is nested within keyword term aggregation, should use star tree + TermsAggregationBuilder termsAggregationBuilder = terms("term").field(KEYWORD_FIELD).subAggregation(maxAggNoSub); + baseQuery = new MatchAllQueryBuilder(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().size(0).query(baseQuery).aggregation(termsAggregationBuilder); + + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree1", + -1, + List.of(new NumericDimension(NUMERIC_FIELD), new OrdinalDimension(KEYWORD_FIELD)), + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX))), + baseQuery, + sourceBuilder, + true + ), + -1 + ); + + // Case 2: MatchAllQuery and non-nested metric aggregations is nested within numeric term aggregation, should use star tree + termsAggregationBuilder = terms("term").field(NUMERIC_FIELD).subAggregation(maxAggNoSub); + sourceBuilder = new SearchSourceBuilder().size(0).query(new MatchAllQueryBuilder()).aggregation(termsAggregationBuilder); + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree1", + -1, + List.of(new NumericDimension(NUMERIC_FIELD), new OrdinalDimension(KEYWORD_FIELD)), + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX))), + baseQuery, + sourceBuilder, + true + ), + -1 + ); + + // Case 3: NumericTermsQuery and non-nested metric aggregations is nested within keyword term aggregation, should use star tree + termsAggregationBuilder = terms("term").field(KEYWORD_FIELD).subAggregation(maxAggNoSub); + baseQuery = new TermQueryBuilder(FIELD_NAME, 1); + sourceBuilder = new SearchSourceBuilder().size(0).query(baseQuery).aggregation(termsAggregationBuilder); + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree1", + -1, + List.of(new NumericDimension(NUMERIC_FIELD), new OrdinalDimension(KEYWORD_FIELD), new NumericDimension(FIELD_NAME)), + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX))), + baseQuery, + sourceBuilder, + true + ), + -1 + ); + + // Case 4: NumericTermsQuery and multiple non-nested metric aggregations is within numeric term aggregation, should use star tree + termsAggregationBuilder = terms("term").field(NUMERIC_FIELD).subAggregation(maxAggNoSub).subAggregation(sumAggNoSub); + sourceBuilder = new SearchSourceBuilder().size(0).query(new TermQueryBuilder(FIELD_NAME, 1)).aggregation(termsAggregationBuilder); + + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree1", + -1, + List.of(new NumericDimension(NUMERIC_FIELD), new OrdinalDimension(KEYWORD_FIELD), new NumericDimension(FIELD_NAME)), + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX))), + baseQuery, + sourceBuilder, + true + ), + -1 + ); + + // Case 5: Nested metric aggregations is nested within numeric term aggregation, should not use star tree + termsAggregationBuilder = terms("term").field(NUMERIC_FIELD).subAggregation(sumAggSub); + sourceBuilder = new SearchSourceBuilder().size(0).query(new TermQueryBuilder(FIELD_NAME, 1)).aggregation(termsAggregationBuilder); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 6: Unsupported aggregations is nested within numeric term aggregation, should not use star tree + termsAggregationBuilder = terms("term").field(NUMERIC_FIELD).subAggregation(medianAgg); + sourceBuilder = new SearchSourceBuilder().size(0).query(new TermQueryBuilder(FIELD_NAME, 1)).aggregation(termsAggregationBuilder); + assertStarTreeContext(request, sourceBuilder, null, -1); + + setStarTreeIndexSetting(null); + } + private void setStarTreeIndexSetting(String value) { client().admin() .cluster() diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/KeywordTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/KeywordTermsAggregatorTests.java new file mode 100644 index 0000000000000..2ca9f6b592a0d --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/KeywordTermsAggregatorTests.java @@ -0,0 +1,245 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.startree; + +import com.carrotsearch.randomizedtesting.RandomizedTest; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene101.Lucene101Codec; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.composite101.Composite101Codec; +import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.OrdinalDimension; +import org.opensearch.index.mapper.KeywordFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.aggregations.Aggregator; +import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.aggregations.bucket.terms.InternalTerms; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Random; + +import static org.opensearch.search.aggregations.AggregationBuilders.avg; +import static org.opensearch.search.aggregations.AggregationBuilders.count; +import static org.opensearch.search.aggregations.AggregationBuilders.max; +import static org.opensearch.search.aggregations.AggregationBuilders.min; +import static org.opensearch.search.aggregations.AggregationBuilders.sum; +import static org.opensearch.search.aggregations.AggregationBuilders.terms; +import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; + +public class KeywordTermsAggregatorTests extends AggregatorTestCase { + final static String STATUS = "status"; + final static String SIZE = "size"; + final static String CLIENTIP = "clientip"; + private static final MappedFieldType STATUS_FIELD_TYPE = new NumberFieldMapper.NumberFieldType( + STATUS, + NumberFieldMapper.NumberType.LONG + ); + private static final MappedFieldType SIZE_FIELD_NAME = new NumberFieldMapper.NumberFieldType(SIZE, NumberFieldMapper.NumberType.FLOAT); + private static final MappedFieldType CLIENTIP_FIELD_NAME = new KeywordFieldMapper.KeywordFieldType(CLIENTIP); + + @Before + public void setup() { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + } + + @After + public void teardown() throws IOException { + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + } + + protected Codec getCodec() { + final Logger testLogger = LogManager.getLogger(KeywordTermsAggregatorTests.class); + MapperService mapperService; + try { + mapperService = StarTreeDocValuesFormatTests.createMapperService(NumericTermsAggregatorTests.getExpandedMapping(1, false)); + } catch (IOException e) { + throw new RuntimeException(e); + } + return new Composite101Codec(Lucene101Codec.Mode.BEST_SPEED, mapperService, testLogger); + } + + public void testStarTreeKeywordTerms() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setCodec(getCodec()); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + + Random random = RandomizedTest.getRandom(); + int totalDocs = 100; + + long val; + + List docs = new ArrayList<>(); + // Index 100 random documents + for (int i = 0; i < totalDocs; i++) { + Document doc = new Document(); + if (random.nextBoolean()) { + val = random.nextInt(10); // Random int between 0 and 9 for status + doc.add(new SortedNumericDocValuesField(STATUS, val)); + } + if (random.nextBoolean()) { + val = NumericUtils.doubleToSortableLong(random.nextInt(100) + 0.5f); + doc.add(new SortedNumericDocValuesField(SIZE, val)); + } + if (random.nextBoolean()) { + val = random.nextInt(10); // Random strings for int between 0 and 9 for clientip + doc.add(new SortedSetDocValuesField(CLIENTIP, new BytesRef(String.valueOf(val)))); + doc.add(new StringField(CLIENTIP, String.valueOf(val), Field.Store.NO)); + } + iw.addDocument(doc); + docs.add(doc); + } + + if (randomBoolean()) { + iw.forceMerge(1); + } + iw.close(); + DirectoryReader ir = DirectoryReader.open(directory); + LeafReaderContext context = ir.leaves().get(0); + + SegmentReader reader = Lucene.segmentReader(context.reader()); + IndexSearcher indexSearcher = newSearcher(wrapInMockESDirectoryReader(ir), false, false); + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); + CompositeIndexFieldInfo starTree = compositeIndexFields.get(0); + + LinkedHashMap supportedDimensions = new LinkedHashMap<>(); + supportedDimensions.put(new NumericDimension(STATUS), STATUS_FIELD_TYPE); + supportedDimensions.put(new NumericDimension(SIZE), SIZE_FIELD_NAME); + supportedDimensions.put(new OrdinalDimension(CLIENTIP), CLIENTIP_FIELD_NAME); + + Query query = new MatchAllDocsQuery(); + QueryBuilder queryBuilder = null; + TermsAggregationBuilder termsAggregationBuilder = terms("terms_agg").field(CLIENTIP) + .collectMode(Aggregator.SubAggCollectionMode.BREADTH_FIRST); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + + ValuesSourceAggregationBuilder[] aggBuilders = { + sum("_sum").field(SIZE), + max("_max").field(SIZE), + min("_min").field(SIZE), + count("_count").field(SIZE), + avg("_avg").field(SIZE) }; + + for (ValuesSourceAggregationBuilder aggregationBuilder : aggBuilders) { + query = new MatchAllDocsQuery(); + queryBuilder = null; + + termsAggregationBuilder = terms("terms_agg").field(CLIENTIP) + .subAggregation(aggregationBuilder) + .collectMode(Aggregator.SubAggCollectionMode.BREADTH_FIRST); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + + // Numeric-terms query with keyword terms aggregation + for (int cases = 0; cases < 100; cases++) { + // query of status field + String queryField = STATUS; + long queryValue = random.nextInt(10); + query = SortedNumericDocValuesField.newSlowExactQuery(queryField, queryValue); + queryBuilder = new TermQueryBuilder(queryField, queryValue); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + + // query on size field + queryField = SIZE; + queryValue = NumericUtils.floatToSortableInt(random.nextInt(20) - 14.5f); + query = SortedNumericDocValuesField.newSlowExactQuery(queryField, queryValue); + queryBuilder = new TermQueryBuilder(queryField, queryValue); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + } + } + ir.close(); + directory.close(); + } + + private void testCase( + IndexSearcher indexSearcher, + Query query, + QueryBuilder queryBuilder, + TermsAggregationBuilder termsAggregationBuilder, + CompositeIndexFieldInfo starTree, + LinkedHashMap supportedDimensions + ) throws IOException { + InternalTerms starTreeAggregation = searchAndReduceStarTree( + createIndexSettings(), + indexSearcher, + query, + queryBuilder, + termsAggregationBuilder, + starTree, + supportedDimensions, + null, + DEFAULT_MAX_BUCKETS, + false, + null, + true, + STATUS_FIELD_TYPE, + SIZE_FIELD_NAME, + CLIENTIP_FIELD_NAME + ); + + InternalTerms defaultAggregation = searchAndReduceStarTree( + createIndexSettings(), + indexSearcher, + query, + queryBuilder, + termsAggregationBuilder, + null, + null, + null, + DEFAULT_MAX_BUCKETS, + false, + null, + false, + STATUS_FIELD_TYPE, + SIZE_FIELD_NAME, + CLIENTIP_FIELD_NAME + ); + + assertEquals(defaultAggregation.getBuckets().size(), starTreeAggregation.getBuckets().size()); + assertEquals(defaultAggregation.getBuckets(), starTreeAggregation.getBuckets()); + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java new file mode 100644 index 0000000000000..d3cb2d17e7c16 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java @@ -0,0 +1,342 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.startree; + +import com.carrotsearch.randomizedtesting.RandomizedTest; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene101.Lucene101Codec; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.composite101.Composite101Codec; +import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.aggregations.bucket.terms.InternalTerms; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Random; + +import static org.opensearch.index.codec.composite912.datacube.startree.AbstractStarTreeDVFormatTests.topMapping; +import static org.opensearch.search.aggregations.AggregationBuilders.avg; +import static org.opensearch.search.aggregations.AggregationBuilders.count; +import static org.opensearch.search.aggregations.AggregationBuilders.max; +import static org.opensearch.search.aggregations.AggregationBuilders.min; +import static org.opensearch.search.aggregations.AggregationBuilders.sum; +import static org.opensearch.search.aggregations.AggregationBuilders.terms; +import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; + +public class NumericTermsAggregatorTests extends AggregatorTestCase { + final static String STATUS = "status"; + final static String SIZE = "size"; + private static final MappedFieldType STATUS_FIELD_TYPE = new NumberFieldMapper.NumberFieldType( + STATUS, + NumberFieldMapper.NumberType.LONG + ); + private static final MappedFieldType SIZE_FIELD_NAME = new NumberFieldMapper.NumberFieldType(SIZE, NumberFieldMapper.NumberType.FLOAT); + + @Before + public void setup() { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + } + + @After + public void teardown() throws IOException { + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + } + + protected Codec getCodec() { + final Logger testLogger = LogManager.getLogger(NumericTermsAggregatorTests.class); + MapperService mapperService; + try { + mapperService = StarTreeDocValuesFormatTests.createMapperService(getExpandedMapping(1, false)); + } catch (IOException e) { + throw new RuntimeException(e); + } + return new Composite101Codec(Lucene101Codec.Mode.BEST_SPEED, mapperService, testLogger); + } + + public void testStarTreeNumericTerms() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setCodec(getCodec()); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + + Random random = RandomizedTest.getRandom(); + int totalDocs = 100; + + long val; + + List docs = new ArrayList<>(); + // Index 100 random documents + for (int i = 0; i < totalDocs; i++) { + Document doc = new Document(); + if (random.nextBoolean()) { + val = random.nextInt(10); // Random int between (0 and 9) for status + doc.add(new SortedNumericDocValuesField(STATUS, val)); + } + if (random.nextBoolean()) { + val = NumericUtils.doubleToSortableLong(random.nextInt(100) + 0.5f); + // Random float between (0 and 99)+0.5f for size + doc.add(new SortedNumericDocValuesField(SIZE, val)); + } + iw.addDocument(doc); + docs.add(doc); + } + + if (randomBoolean()) { + iw.forceMerge(1); + } + iw.close(); + DirectoryReader ir = DirectoryReader.open(directory); + LeafReaderContext context = ir.leaves().get(0); + + SegmentReader reader = Lucene.segmentReader(context.reader()); + IndexSearcher indexSearcher = newSearcher(reader, false, false); + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); + CompositeIndexFieldInfo starTree = compositeIndexFields.get(0); + + LinkedHashMap supportedDimensions = new LinkedHashMap<>(); + supportedDimensions.put(new NumericDimension(STATUS), STATUS_FIELD_TYPE); + supportedDimensions.put(new NumericDimension(SIZE), SIZE_FIELD_NAME); + + Query query = new MatchAllDocsQuery(); + QueryBuilder queryBuilder = null; + TermsAggregationBuilder termsAggregationBuilder = terms("terms_agg").field(STATUS); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + + ValuesSourceAggregationBuilder[] aggBuilders = { + sum("_sum").field(SIZE), + max("_max").field(SIZE), + min("_min").field(SIZE), + count("_count").field(SIZE), + avg("_avg").field(SIZE) }; + + for (ValuesSourceAggregationBuilder aggregationBuilder : aggBuilders) { + query = new MatchAllDocsQuery(); + queryBuilder = null; + termsAggregationBuilder = terms("terms_agg").field(STATUS).subAggregation(aggregationBuilder); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + + // Numeric-terms query with numeric terms aggregation + for (int cases = 0; cases < 100; cases++) { + + // query of status field + String queryField = STATUS; + long queryValue = random.nextInt(10); + query = SortedNumericDocValuesField.newSlowExactQuery(queryField, queryValue); + queryBuilder = new TermQueryBuilder(queryField, queryValue); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + + // query on size field + queryField = SIZE; + queryValue = NumericUtils.floatToSortableInt(random.nextInt(20) - 14.5f); + query = SortedNumericDocValuesField.newSlowExactQuery(queryField, queryValue); + queryBuilder = new TermQueryBuilder(queryField, queryValue); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + } + } + + aggBuilders = new ValuesSourceAggregationBuilder[] { + sum("_sum").field(STATUS), + max("_max").field(STATUS), + min("_min").field(STATUS), + count("_count").field(STATUS), + avg("_avg").field(STATUS) }; + + for (ValuesSourceAggregationBuilder aggregationBuilder : aggBuilders) { + query = new MatchAllDocsQuery(); + queryBuilder = null; + + termsAggregationBuilder = terms("terms_agg").field(SIZE).subAggregation(aggregationBuilder); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + } + + ir.close(); + directory.close(); + } + + private void testCase( + IndexSearcher indexSearcher, + Query query, + QueryBuilder queryBuilder, + TermsAggregationBuilder termsAggregationBuilder, + CompositeIndexFieldInfo starTree, + LinkedHashMap supportedDimensions + ) throws IOException { + InternalTerms starTreeAggregation = searchAndReduceStarTree( + createIndexSettings(), + indexSearcher, + query, + queryBuilder, + termsAggregationBuilder, + starTree, + supportedDimensions, + null, + DEFAULT_MAX_BUCKETS, + false, + null, + true, + STATUS_FIELD_TYPE, + SIZE_FIELD_NAME + ); + + InternalTerms defaultAggregation = searchAndReduceStarTree( + createIndexSettings(), + indexSearcher, + query, + queryBuilder, + termsAggregationBuilder, + null, + null, + null, + DEFAULT_MAX_BUCKETS, + false, + null, + false, + STATUS_FIELD_TYPE, + SIZE_FIELD_NAME + ); + + assertEquals(defaultAggregation.getBuckets().size(), starTreeAggregation.getBuckets().size()); + assertEquals(defaultAggregation.getBuckets(), starTreeAggregation.getBuckets()); + } + + public static XContentBuilder getExpandedMapping(int maxLeafDocs, boolean skipStarNodeCreationForStatusDimension) throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree1"); // Use the same name as the provided mapping + b.field("type", "star_tree"); + b.startObject("config"); + b.field("max_leaf_docs", maxLeafDocs); + if (skipStarNodeCreationForStatusDimension) { + b.startArray("skip_star_node_creation_for_dimensions"); + b.value("status"); // Skip for "status" dimension + b.endArray(); + } + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "status"); + b.endObject(); + b.startObject(); + b.field("name", "size"); + b.endObject(); + b.startObject(); + b.field("name", "clientip"); + b.endObject(); + b.startObject(); + b.field("name", "@timestamp"); + b.startArray("calendar_intervals"); + b.value("month"); + b.value("day"); + b.endArray(); + b.endObject(); + b.endArray(); + b.startArray("metrics"); + b.startObject(); + b.field("name", "size"); + b.startArray("stats"); + b.value("sum"); + b.value("value_count"); + b.value("min"); + b.value("max"); + b.endArray(); + b.endObject(); + b.startObject(); + b.field("name", "status"); + b.startArray("stats"); + b.value("sum"); + b.value("value_count"); + b.value("min"); + b.value("max"); + b.endArray(); + b.endObject(); + b.endArray(); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("properties"); + b.startObject("@timestamp"); + b.field("type", "date"); + b.field("format", "strict_date_optional_time||epoch_second"); + b.endObject(); + b.startObject("message"); + b.field("type", "keyword"); + b.field("index", false); + b.field("doc_values", false); + b.endObject(); + b.startObject("clientip"); + b.field("type", "keyword"); + b.endObject(); + b.startObject("request"); + b.field("type", "text"); + b.startObject("fields"); + b.startObject("raw"); + b.field("type", "keyword"); + b.field("ignore_above", 256); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("status"); + b.field("type", "integer"); + b.endObject(); + b.startObject("size"); + b.field("type", "float"); + b.endObject(); + b.startObject("geoip"); + b.startObject("properties"); + b.startObject("country_name"); + b.field("type", "keyword"); + b.endObject(); + b.startObject("city_name"); + b.field("type", "keyword"); + b.endObject(); + b.startObject("location"); + b.field("type", "geo_point"); + b.endObject(); + b.endObject(); + b.endObject(); + b.endObject(); + }); + } +} diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index eba1769ad882d..df982d4f0c7f3 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -168,6 +168,7 @@ import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; @@ -441,6 +442,8 @@ protected SearchContext createSearchContextWithStarTreeContext( searchContext.getQueryShardContext().setStarTreeQueryContext(starTreeQueryContext); } + Stream.of(fieldTypes).forEach(fieldType -> when(mapperService.fieldType(fieldType.name())).thenReturn(fieldType)); + return searchContext; } From f6d6aa61e5039e4c6143cc25a71c3e448572dd33 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Tue, 4 Mar 2025 10:44:36 +0530 Subject: [PATCH 38/48] Fix flaky test RemoteIndexRecoveryIT.testRerouteRecovery (#17228) Signed-off-by: Sachin Kale --- .../indices/recovery/IndexRecoveryIT.java | 4 ++-- .../remotestore/RemoteIndexRecoveryIT.java | 24 +++++++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index d30806b4325ac..9d893cb6f33c7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -250,7 +250,7 @@ private void assertOnGoingRecoveryState( assertThat(state.getStage(), not(equalTo(Stage.DONE))); } - private void slowDownRecovery(ByteSizeValue shardSize) { + public void slowDownRecovery(ByteSizeValue shardSize) { long chunkSize = Math.max(1, shardSize.getBytes() / 10); assertTrue( client().admin() @@ -528,7 +528,7 @@ public void testRerouteRecovery() throws Exception { assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsSource(), equalTo(1)); indicesService = internalCluster().getInstance(IndicesService.class, nodeB); assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsTarget(), equalTo(1)); - }, TimeValue.timeValueSeconds(10), TimeValue.timeValueMillis(500)); + }, TimeValue.timeValueSeconds(60), TimeValue.timeValueMillis(500)); logger.info("--> request recoveries"); RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java index 6de61cf203c60..1961b0fa43705 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java @@ -10,9 +10,12 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.indices.recovery.IndexRecoveryIT; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; import org.hamcrest.Matcher; @@ -22,6 +25,7 @@ import java.nio.file.Path; +import static org.opensearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE_SETTING; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -54,6 +58,26 @@ public Settings indexSettings() { .build(); } + @Override + public void slowDownRecovery(ByteSizeValue shardSize) { + logger.info("--> shardSize: " + shardSize); + long chunkSize = Math.max(1, shardSize.getBytes() / 50); + assertTrue( + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + // one chunk per sec.. + .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSize, ByteSizeUnit.BYTES) + // small chunks + .put(INDICES_RECOVERY_CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkSize, ByteSizeUnit.BYTES)) + ) + .get() + .isAcknowledged() + ); + } + @After public void teardown() { clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); From 2e4cc8c6e12f0e5fdfe9274da0126e81f95f59b3 Mon Sep 17 00:00:00 2001 From: Fen Qin <75345540+fen-qin@users.noreply.github.com> Date: Tue, 4 Mar 2025 14:37:02 -0800 Subject: [PATCH 39/48] Fix explain action on query rewrite (#17286) (#17286) Signed-off-by: Fen Qin --- CHANGELOG-3.0.md | 1 + .../opensearch/explain/ExplainActionIT.java | 24 +++++++++++++++++++ .../explain/TransportExplainAction.java | 17 ++++++++++++- 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 7211368c65ffb..62d55d40f4bb7 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -77,6 +77,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix swapped field formats in nodes API where `total_indexing_buffer_in_bytes` and `total_indexing_buffer` values were reversed ([#17070](https://github.com/opensearch-project/OpenSearch/pull/17070)) - Add HTTP/2 protocol support to HttpRequest.HttpVersion ([#17248](https://github.com/opensearch-project/OpenSearch/pull/17248)) - Fix missing bucket in terms aggregation with missing value ([#17418](https://github.com/opensearch-project/OpenSearch/pull/17418)) +- Fix explain action on query rewrite ([#17286](https://github.com/opensearch-project/OpenSearch/pull/17286)) ### Security diff --git a/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java b/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java index 2949fa34a0795..723ff803851d3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java @@ -40,6 +40,8 @@ import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.indices.TermsLookup; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.ByteArrayInputStream; @@ -52,6 +54,7 @@ import java.util.Set; import static java.util.Collections.singleton; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.queryStringQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -305,4 +308,25 @@ public void testStreamExplain() throws Exception { result = Lucene.readExplanation(esBuffer); assertThat(exp.toString(), equalTo(result.toString())); } + + public void testQueryRewrite() { + client().admin() + .indices() + .prepareCreate("twitter") + .setMapping("user", "type=integer", "followers", "type=integer") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put("index.number_of_routing_shards", 2)) + .get(); + client().prepareIndex("twitter").setId("1").setSource("followers", new int[] { 1, 2, 3 }).get(); + refresh(); + + TermsQueryBuilder termsLookupQuery = QueryBuilders.termsLookupQuery("user", new TermsLookup("twitter", "1", "followers")); + ExplainResponse response = client().prepareExplain("twitter", "1").setQuery(termsLookupQuery).get(); + assertNotNull(response); + assertTrue(response.isExists()); + assertFalse(response.isMatch()); + assertThat(response.getIndex(), equalTo("twitter")); + assertThat(response.getId(), equalTo("1")); + assertNotNull(response.getExplanation()); + assertFalse(response.getExplanation().isMatch()); + } } diff --git a/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java index fb2ccc6ebbf12..710fb46ce7328 100644 --- a/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java @@ -52,6 +52,8 @@ import org.opensearch.index.get.GetResult; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.Uid; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.Rewriteable; import org.opensearch.index.shard.IndexShard; import org.opensearch.search.SearchService; import org.opensearch.search.internal.AliasFilter; @@ -101,7 +103,20 @@ public TransportExplainAction( @Override protected void doExecute(Task task, ExplainRequest request, ActionListener listener) { request.nowInMillis = System.currentTimeMillis(); - super.doExecute(task, request, listener); + // if there's no query we can't rewrite it + if (request.query() == null) { + super.doExecute(task, request, listener); + return; + } + ActionListener rewriteListener = ActionListener.wrap(rewrittenQuery -> { + request.query(rewrittenQuery); + super.doExecute(task, request, listener); + }, listener::onFailure); + Rewriteable.rewriteAndFetch( + request.query(), + searchService.getIndicesService().getRewriteContext(() -> request.nowInMillis), + rewriteListener + ); } @Override From 17363d5bb46584e7dcd4dea218c05dd37a563dbe Mon Sep 17 00:00:00 2001 From: Varun Bharadwaj Date: Tue, 4 Mar 2025 15:04:48 -0800 Subject: [PATCH 40/48] [Pull-based Ingestion] Add error handling strategy to pull-based ingestion (#17427) * Add error handling strategy to pull-based ingestion Signed-off-by: Varun Bharadwaj * Make error strategy config type-safe Signed-off-by: Varun Bharadwaj --------- Signed-off-by: Varun Bharadwaj --- CHANGELOG-3.0.md | 1 + .../plugin/kafka/KafkaPartitionConsumer.java | 5 + .../cluster/metadata/IndexMetadata.java | 14 +- .../cluster/metadata/IngestionSource.java | 31 +++- .../common/settings/IndexScopedSettings.java | 1 + .../index/IngestionShardConsumer.java | 5 + .../index/engine/IngestionEngine.java | 20 ++- .../BlockIngestionErrorStrategy.java | 36 +++++ .../pollingingest/DefaultStreamPoller.java | 30 +++- .../DropIngestionErrorStrategy.java | 37 +++++ .../pollingingest/IngestionErrorStrategy.java | 68 +++++++++ .../MessageProcessorRunnable.java | 21 ++- .../metadata/IngestionSourceTests.java | 24 +-- .../index/engine/FakeIngestionSource.java | 5 + .../DefaultStreamPollerTests.java | 137 +++++++++++++++++- 15 files changed, 398 insertions(+), 37 deletions(-) create mode 100644 server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java create mode 100644 server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java create mode 100644 server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 62d55d40f4bb7..7e82efd268007 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -23,6 +23,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Added offset management for the pull-based Ingestion ([#17354](https://github.com/opensearch-project/OpenSearch/pull/17354)) - Add filter function for AbstractQueryBuilder, BoolQueryBuilder, ConstantScoreQueryBuilder([#17409](https://github.com/opensearch-project/OpenSearch/pull/17409)) - [Star Tree] [Search] Resolving keyword & numeric bucket aggregation with metric aggregation using star-tree ([#17165](https://github.com/opensearch-project/OpenSearch/pull/17165)) +- Added error handling support for the pull-based ingestion ([#17427](https://github.com/opensearch-project/OpenSearch/pull/17427)) ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) diff --git a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java index 9461cfbc2de98..c749a887a2ccb 100644 --- a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java +++ b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java @@ -132,6 +132,11 @@ public KafkaOffset nextPointer() { return new KafkaOffset(lastFetchedOffset + 1); } + @Override + public KafkaOffset nextPointer(KafkaOffset pointer) { + return new KafkaOffset(pointer.getOffset() + 1); + } + @Override public IngestionShardPointer earliestPointer() { long startOffset = AccessController.doPrivileged( diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index cabea0efe8433..e9bd3b74404b1 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -71,6 +71,7 @@ import org.opensearch.index.IndexModule; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.indices.pollingingest.IngestionErrorStrategy; import org.opensearch.indices.pollingingest.StreamPoller; import org.opensearch.indices.replication.SegmentReplicationSource; import org.opensearch.indices.replication.common.ReplicationType; @@ -770,6 +771,15 @@ public Iterator> settings() { Property.Final ); + public static final String SETTING_INGESTION_SOURCE_ERROR_STRATEGY = "index.ingestion_source.error_strategy"; + public static final Setting INGESTION_SOURCE_ERROR_STRATEGY_SETTING = new Setting<>( + SETTING_INGESTION_SOURCE_ERROR_STRATEGY, + IngestionErrorStrategy.ErrorStrategy.DROP.name(), + IngestionErrorStrategy.ErrorStrategy::parseFromString, + (errorStrategy) -> {}, + Property.IndexScope + ); + public static final Setting.AffixSetting INGESTION_SOURCE_PARAMS_SETTING = Setting.prefixKeySetting( "index.ingestion_source.param.", key -> new Setting<>(key, "", (value) -> { @@ -1004,8 +1014,10 @@ public IngestionSource getIngestionSource() { pointerInitResetType, pointerInitResetValue ); + + final IngestionErrorStrategy.ErrorStrategy errorStrategy = INGESTION_SOURCE_ERROR_STRATEGY_SETTING.get(settings); final Map ingestionSourceParams = INGESTION_SOURCE_PARAMS_SETTING.getAsMap(settings); - return new IngestionSource(ingestionSourceType, pointerInitReset, ingestionSourceParams); + return new IngestionSource(ingestionSourceType, pointerInitReset, errorStrategy, ingestionSourceParams); } return null; } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java b/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java index 9849c0a5f2ba9..fd28acf3246ad 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java @@ -9,6 +9,7 @@ package org.opensearch.cluster.metadata; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.indices.pollingingest.IngestionErrorStrategy; import org.opensearch.indices.pollingingest.StreamPoller; import java.util.Map; @@ -21,12 +22,19 @@ public class IngestionSource { private String type; private PointerInitReset pointerInitReset; + private IngestionErrorStrategy.ErrorStrategy errorStrategy; private Map params; - public IngestionSource(String type, PointerInitReset pointerInitReset, Map params) { + public IngestionSource( + String type, + PointerInitReset pointerInitReset, + IngestionErrorStrategy.ErrorStrategy errorStrategy, + Map params + ) { this.type = type; this.pointerInitReset = pointerInitReset; this.params = params; + this.errorStrategy = errorStrategy; } public String getType() { @@ -37,6 +45,10 @@ public PointerInitReset getPointerInitReset() { return pointerInitReset; } + public IngestionErrorStrategy.ErrorStrategy getErrorStrategy() { + return errorStrategy; + } + public Map params() { return params; } @@ -48,17 +60,30 @@ public boolean equals(Object o) { IngestionSource ingestionSource = (IngestionSource) o; return Objects.equals(type, ingestionSource.type) && Objects.equals(pointerInitReset, ingestionSource.pointerInitReset) + && Objects.equals(errorStrategy, ingestionSource.errorStrategy) && Objects.equals(params, ingestionSource.params); } @Override public int hashCode() { - return Objects.hash(type, pointerInitReset, params); + return Objects.hash(type, pointerInitReset, params, errorStrategy); } @Override public String toString() { - return "IngestionSource{" + "type='" + type + '\'' + ",pointer_init_reset='" + pointerInitReset + '\'' + ", params=" + params + '}'; + return "IngestionSource{" + + "type='" + + type + + '\'' + + ",pointer_init_reset='" + + pointerInitReset + + '\'' + + ",error_strategy='" + + errorStrategy + + '\'' + + ", params=" + + params + + '}'; } /** diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index dc77ffd720bad..12bee5cd14f57 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -266,6 +266,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetadata.INGESTION_SOURCE_POINTER_INIT_RESET_SETTING, IndexMetadata.INGESTION_SOURCE_POINTER_INIT_RESET_VALUE_SETTING, IndexMetadata.INGESTION_SOURCE_PARAMS_SETTING, + IndexMetadata.INGESTION_SOURCE_ERROR_STRATEGY_SETTING, // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { diff --git a/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java b/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java index 41e659196a612..a9ffcaca850f2 100644 --- a/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java +++ b/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java @@ -72,6 +72,11 @@ public M getMessage() { */ T nextPointer(); + /** + * @return the immediate next pointer from the provided start pointer + */ + T nextPointer(T startPointer); + /** * @return the earliest pointer in the shard */ diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java index 00feab082c178..b919e15b56211 100644 --- a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -29,6 +29,7 @@ import org.opensearch.index.translog.TranslogStats; import org.opensearch.index.translog.listener.CompositeTranslogEventListener; import org.opensearch.indices.pollingingest.DefaultStreamPoller; +import org.opensearch.indices.pollingingest.IngestionErrorStrategy; import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.indices.pollingingest.StreamPoller; @@ -99,12 +100,21 @@ public void start() { } String resetValue = ingestionSource.getPointerInitReset().getValue(); - streamPoller = new DefaultStreamPoller(startPointer, persistedPointers, ingestionShardConsumer, this, resetState, resetValue); + IngestionErrorStrategy ingestionErrorStrategy = IngestionErrorStrategy.create( + ingestionSource.getErrorStrategy(), + ingestionSource.getType() + ); - // Poller is only started on the primary shard. Replica shards will rely on segment replication. - if (!engineConfig.isReadOnlyReplica()) { - streamPoller.start(); - } + streamPoller = new DefaultStreamPoller( + startPointer, + persistedPointers, + ingestionShardConsumer, + this, + resetState, + resetValue, + ingestionErrorStrategy + ); + streamPoller.start(); } protected Set fetchPersistedOffsets(DirectoryReader directoryReader, IngestionShardPointer batchStart) diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java b/server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java new file mode 100644 index 0000000000000..d0febd0909be2 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +/** + * This error handling strategy blocks on failures preventing processing of remaining updates in the ingestion source. + */ +public class BlockIngestionErrorStrategy implements IngestionErrorStrategy { + private static final Logger logger = LogManager.getLogger(BlockIngestionErrorStrategy.class); + private final String ingestionSource; + + public BlockIngestionErrorStrategy(String ingestionSource) { + this.ingestionSource = ingestionSource; + } + + @Override + public void handleError(Throwable e, ErrorStage stage) { + logger.error("Error processing update from {}: {}", ingestionSource, e); + + // todo: record blocking update and emit metrics + } + + @Override + public boolean shouldPauseIngestion(Throwable e, ErrorStage stage) { + return true; + } +} diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java index 3dfd77f75c82d..6fe010504f1a8 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java @@ -67,21 +67,25 @@ public class DefaultStreamPoller implements StreamPoller { @Nullable private IngestionShardPointer maxPersistedPointer; + private IngestionErrorStrategy errorStrategy; + public DefaultStreamPoller( IngestionShardPointer startPointer, Set persistedPointers, IngestionShardConsumer consumer, IngestionEngine ingestionEngine, ResetState resetState, - String resetValue + String resetValue, + IngestionErrorStrategy errorStrategy ) { this( startPointer, persistedPointers, consumer, - new MessageProcessorRunnable(new ArrayBlockingQueue<>(100), ingestionEngine), + new MessageProcessorRunnable(new ArrayBlockingQueue<>(100), ingestionEngine, errorStrategy), resetState, - resetValue + resetValue, + errorStrategy ); } @@ -91,7 +95,8 @@ public DefaultStreamPoller( IngestionShardConsumer consumer, MessageProcessorRunnable processorRunnable, ResetState resetState, - String resetValue + String resetValue, + IngestionErrorStrategy errorStrategy ) { this.consumer = Objects.requireNonNull(consumer); this.resetState = resetState; @@ -117,6 +122,7 @@ public DefaultStreamPoller( String.format(Locale.ROOT, "stream-poller-processor-%d-%d", consumer.getShardId(), System.currentTimeMillis()) ) ); + this.errorStrategy = errorStrategy; } @Override @@ -141,6 +147,9 @@ protected void startPoll() { } logger.info("Starting poller for shard {}", consumer.getShardId()); + // track the last record successfully written to the blocking queue + IngestionShardPointer lastSuccessfulPointer = null; + while (true) { try { if (closed) { @@ -209,6 +218,7 @@ protected void startPoll() { } totalPolledCount.inc(); blockingQueue.put(result); + lastSuccessfulPointer = result.getPointer(); logger.debug( "Put message {} with pointer {} to the blocking queue", String.valueOf(result.getMessage().getPayload()), @@ -218,8 +228,18 @@ protected void startPoll() { // update the batch start pointer to the next batch batchStartPointer = consumer.nextPointer(); } catch (Throwable e) { - // TODO better error handling logger.error("Error in polling the shard {}: {}", consumer.getShardId(), e); + errorStrategy.handleError(e, IngestionErrorStrategy.ErrorStage.POLLING); + + if (errorStrategy.shouldPauseIngestion(e, IngestionErrorStrategy.ErrorStage.POLLING)) { + // Blocking error encountered. Pause poller to stop processing remaining updates. + pause(); + } else { + // Advance the batch start pointer to ignore the error and continue from next record + batchStartPointer = lastSuccessfulPointer == null + ? consumer.nextPointer(batchStartPointer) + : consumer.nextPointer(lastSuccessfulPointer); + } } } } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java b/server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java new file mode 100644 index 0000000000000..4598bf1248cfd --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +/** + * This error handling strategy drops failures and proceeds with remaining updates in the ingestion source. + */ +public class DropIngestionErrorStrategy implements IngestionErrorStrategy { + private static final Logger logger = LogManager.getLogger(DropIngestionErrorStrategy.class); + private final String ingestionSource; + + public DropIngestionErrorStrategy(String ingestionSource) { + this.ingestionSource = ingestionSource; + } + + @Override + public void handleError(Throwable e, ErrorStage stage) { + logger.error("Error processing update from {}: {}", ingestionSource, e); + + // todo: record failed update stats and emit metrics + } + + @Override + public boolean shouldPauseIngestion(Throwable e, ErrorStage stage) { + return false; + } + +} diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java b/server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java new file mode 100644 index 0000000000000..a6e992a460cc1 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Locale; + +/** + * Defines the error handling strategy when an error is encountered either during polling records from ingestion source + * or during processing the polled records. + */ +@ExperimentalApi +public interface IngestionErrorStrategy { + + /** + * Process and record the error. + */ + void handleError(Throwable e, ErrorStage stage); + + /** + * Indicates if ingestion must be paused, blocking further writes. + */ + boolean shouldPauseIngestion(Throwable e, ErrorStage stage); + + static IngestionErrorStrategy create(ErrorStrategy errorStrategy, String ingestionSource) { + switch (errorStrategy) { + case BLOCK: + return new BlockIngestionErrorStrategy(ingestionSource); + case DROP: + default: + return new DropIngestionErrorStrategy(ingestionSource); + } + } + + /** + * Indicates available error handling strategies + */ + @ExperimentalApi + enum ErrorStrategy { + DROP, + BLOCK; + + public static ErrorStrategy parseFromString(String errorStrategy) { + try { + return ErrorStrategy.valueOf(errorStrategy.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Invalid ingestion errorStrategy: " + errorStrategy, e); + } + } + } + + /** + * Indicates different stages of encountered errors + */ + @ExperimentalApi + enum ErrorStage { + POLLING, + PROCESSING + } + +} diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java index 0c06ebc558466..0ac791e60de5a 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java @@ -50,6 +50,7 @@ public class MessageProcessorRunnable implements Runnable { private final BlockingQueue> blockingQueue; private final MessageProcessor messageProcessor; private final CounterMetric stats = new CounterMetric(); + private IngestionErrorStrategy errorStrategy; private static final String ID = "_id"; private static final String OP_TYPE = "_op_type"; @@ -63,9 +64,10 @@ public class MessageProcessorRunnable implements Runnable { */ public MessageProcessorRunnable( BlockingQueue> blockingQueue, - IngestionEngine engine + IngestionEngine engine, + IngestionErrorStrategy errorStrategy ) { - this(blockingQueue, new MessageProcessor(engine)); + this(blockingQueue, new MessageProcessor(engine), errorStrategy); } /** @@ -75,10 +77,12 @@ public MessageProcessorRunnable( */ MessageProcessorRunnable( BlockingQueue> blockingQueue, - MessageProcessor messageProcessor + MessageProcessor messageProcessor, + IngestionErrorStrategy errorStrategy ) { this.blockingQueue = Objects.requireNonNull(blockingQueue); this.messageProcessor = messageProcessor; + this.errorStrategy = errorStrategy; } static class MessageProcessor { @@ -231,8 +235,15 @@ public void run() { Thread.currentThread().interrupt(); // Restore interrupt status } if (result != null) { - stats.inc(); - messageProcessor.process(result.getMessage(), result.getPointer()); + try { + stats.inc(); + messageProcessor.process(result.getMessage(), result.getPointer()); + } catch (Exception e) { + errorStrategy.handleError(e, IngestionErrorStrategy.ErrorStage.PROCESSING); + if (errorStrategy.shouldPauseIngestion(e, IngestionErrorStrategy.ErrorStage.PROCESSING)) { + Thread.currentThread().interrupt(); + } + } } } } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java index 0afe67002517b..05037f33c3965 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java @@ -14,6 +14,8 @@ import java.util.HashMap; import java.util.Map; +import static org.opensearch.indices.pollingingest.IngestionErrorStrategy.ErrorStrategy.DROP; + public class IngestionSourceTests extends OpenSearchTestCase { private final IngestionSource.PointerInitReset pointerInitReset = new IngestionSource.PointerInitReset( @@ -24,52 +26,50 @@ public class IngestionSourceTests extends OpenSearchTestCase { public void testConstructorAndGetters() { Map params = new HashMap<>(); params.put("key", "value"); - IngestionSource source = new IngestionSource("type", pointerInitReset, params); + IngestionSource source = new IngestionSource("type", pointerInitReset, DROP, params); assertEquals("type", source.getType()); assertEquals(StreamPoller.ResetState.REWIND_BY_OFFSET, source.getPointerInitReset().getType()); assertEquals("1000", source.getPointerInitReset().getValue()); + assertEquals(DROP, source.getErrorStrategy()); assertEquals(params, source.params()); } public void testEquals() { Map params1 = new HashMap<>(); params1.put("key", "value"); - IngestionSource source1 = new IngestionSource("type", pointerInitReset, params1); + IngestionSource source1 = new IngestionSource("type", pointerInitReset, DROP, params1); Map params2 = new HashMap<>(); params2.put("key", "value"); - IngestionSource source2 = new IngestionSource("type", pointerInitReset, params2); - + IngestionSource source2 = new IngestionSource("type", pointerInitReset, DROP, params2); assertTrue(source1.equals(source2)); assertTrue(source2.equals(source1)); - IngestionSource source3 = new IngestionSource("differentType", pointerInitReset, params1); + IngestionSource source3 = new IngestionSource("differentType", pointerInitReset, DROP, params1); assertFalse(source1.equals(source3)); } public void testHashCode() { Map params1 = new HashMap<>(); params1.put("key", "value"); - IngestionSource source1 = new IngestionSource("type", pointerInitReset, params1); + IngestionSource source1 = new IngestionSource("type", pointerInitReset, DROP, params1); Map params2 = new HashMap<>(); params2.put("key", "value"); - IngestionSource source2 = new IngestionSource("type", pointerInitReset, params2); - + IngestionSource source2 = new IngestionSource("type", pointerInitReset, DROP, params2); assertEquals(source1.hashCode(), source2.hashCode()); - IngestionSource source3 = new IngestionSource("differentType", pointerInitReset, params1); + IngestionSource source3 = new IngestionSource("differentType", pointerInitReset, DROP, params1); assertNotEquals(source1.hashCode(), source3.hashCode()); } public void testToString() { Map params = new HashMap<>(); params.put("key", "value"); - IngestionSource source = new IngestionSource("type", pointerInitReset, params); - + IngestionSource source = new IngestionSource("type", pointerInitReset, DROP, params); String expected = - "IngestionSource{type='type',pointer_init_reset='PointerInitReset{type='REWIND_BY_OFFSET', value=1000}', params={key=value}}"; + "IngestionSource{type='type',pointer_init_reset='PointerInitReset{type='REWIND_BY_OFFSET', value=1000}',error_strategy='DROP', params={key=value}}"; assertEquals(expected, source.toString()); } } diff --git a/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java b/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java index 1d81a22e94e9c..6233a65664d0b 100644 --- a/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java +++ b/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java @@ -83,6 +83,11 @@ public FakeIngestionShardPointer nextPointer() { return new FakeIngestionShardPointer(lastFetchedOffset + 1); } + @Override + public FakeIngestionShardPointer nextPointer(FakeIngestionShardPointer startPointer) { + return new FakeIngestionShardPointer(startPointer.offset + 1); + } + @Override public FakeIngestionShardPointer earliestPointer() { return new FakeIngestionShardPointer(0); diff --git a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java index c17b11791af09..0f0f90f392242 100644 --- a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java +++ b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java @@ -8,6 +8,7 @@ package org.opensearch.indices.pollingingest; +import org.opensearch.index.IngestionShardConsumer; import org.opensearch.index.IngestionShardPointer; import org.opensearch.index.engine.FakeIngestionSource; import org.opensearch.test.OpenSearchTestCase; @@ -16,19 +17,27 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class DefaultStreamPollerTests extends OpenSearchTestCase { private DefaultStreamPoller poller; @@ -38,6 +47,8 @@ public class DefaultStreamPollerTests extends OpenSearchTestCase { private List messages; private Set persistedPointers; private final int awaitTime = 300; + private final int sleepTime = 300; + private DropIngestionErrorStrategy errorStrategy; @Before public void setUp() throws Exception { @@ -48,7 +59,8 @@ public void setUp() throws Exception { messages.add("{\"_id\":\"2\",\"_source\":{\"name\":\"alice\", \"age\": 21}}".getBytes(StandardCharsets.UTF_8)); fakeConsumer = new FakeIngestionSource.FakeIngestionConsumer(messages, 0); processor = mock(MessageProcessorRunnable.MessageProcessor.class); - processorRunnable = new MessageProcessorRunnable(new ArrayBlockingQueue<>(5), processor); + errorStrategy = new DropIngestionErrorStrategy("ingestion_source"); + processorRunnable = new MessageProcessorRunnable(new ArrayBlockingQueue<>(5), processor, errorStrategy); persistedPointers = new HashSet<>(); poller = new DefaultStreamPoller( new FakeIngestionSource.FakeIngestionShardPointer(0), @@ -56,7 +68,8 @@ public void setUp() throws Exception { fakeConsumer, processorRunnable, StreamPoller.ResetState.NONE, - "" + "", + errorStrategy ); } @@ -111,7 +124,8 @@ public void testSkipProcessed() throws InterruptedException { fakeConsumer, processorRunnable, StreamPoller.ResetState.NONE, - "" + "", + errorStrategy ); CountDownLatch latch = new CountDownLatch(2); @@ -147,7 +161,8 @@ public void testResetStateEarliest() throws InterruptedException { fakeConsumer, processorRunnable, StreamPoller.ResetState.EARLIEST, - "" + "", + errorStrategy ); CountDownLatch latch = new CountDownLatch(2); doAnswer(invocation -> { @@ -169,7 +184,8 @@ public void testResetStateLatest() throws InterruptedException { fakeConsumer, processorRunnable, StreamPoller.ResetState.LATEST, - "" + "", + errorStrategy ); poller.start(); @@ -187,7 +203,8 @@ public void testResetStateRewindByOffset() throws InterruptedException { fakeConsumer, processorRunnable, StreamPoller.ResetState.REWIND_BY_OFFSET, - "1" + "1", + errorStrategy ); CountDownLatch latch = new CountDownLatch(1); doAnswer(invocation -> { @@ -221,4 +238,112 @@ public void testStartClosedPoller() throws InterruptedException { assertEquals("poller is closed!", e.getMessage()); } } + + public void testDropErrorIngestionStrategy() throws TimeoutException, InterruptedException { + messages.add("{\"_id\":\"3\",\"_source\":{\"name\":\"bob\", \"age\": 24}}".getBytes(StandardCharsets.UTF_8)); + messages.add("{\"_id\":\"4\",\"_source\":{\"name\":\"alice\", \"age\": 21}}".getBytes(StandardCharsets.UTF_8)); + List< + IngestionShardConsumer.ReadResult< + FakeIngestionSource.FakeIngestionShardPointer, + FakeIngestionSource.FakeIngestionMessage>> readResultsBatch1 = fakeConsumer.readNext( + fakeConsumer.earliestPointer(), + 2, + 100 + ); + List< + IngestionShardConsumer.ReadResult< + FakeIngestionSource.FakeIngestionShardPointer, + FakeIngestionSource.FakeIngestionMessage>> readResultsBatch2 = fakeConsumer.readNext(fakeConsumer.nextPointer(), 2, 100); + IngestionShardConsumer mockConsumer = mock(IngestionShardConsumer.class); + when(mockConsumer.getShardId()).thenReturn(0); + when(mockConsumer.readNext(any(), anyLong(), anyInt())).thenThrow(new RuntimeException("message1 poll failed")) + .thenReturn(readResultsBatch1) + .thenThrow(new RuntimeException("message3 poll failed")) + .thenReturn(readResultsBatch2) + .thenReturn(Collections.emptyList()); + + IngestionErrorStrategy errorStrategy = spy(new DropIngestionErrorStrategy("ingestion_source")); + poller = new DefaultStreamPoller( + new FakeIngestionSource.FakeIngestionShardPointer(0), + persistedPointers, + mockConsumer, + processorRunnable, + StreamPoller.ResetState.NONE, + "", + errorStrategy + ); + poller.start(); + Thread.sleep(sleepTime); + + verify(errorStrategy, times(2)).handleError(any(), eq(IngestionErrorStrategy.ErrorStage.POLLING)); + verify(processor, times(4)).process(any(), any()); + } + + public void testBlockErrorIngestionStrategy() throws TimeoutException, InterruptedException { + messages.add("{\"_id\":\"3\",\"_source\":{\"name\":\"bob\", \"age\": 24}}".getBytes(StandardCharsets.UTF_8)); + messages.add("{\"_id\":\"4\",\"_source\":{\"name\":\"alice\", \"age\": 21}}".getBytes(StandardCharsets.UTF_8)); + List< + IngestionShardConsumer.ReadResult< + FakeIngestionSource.FakeIngestionShardPointer, + FakeIngestionSource.FakeIngestionMessage>> readResultsBatch1 = fakeConsumer.readNext( + fakeConsumer.earliestPointer(), + 2, + 100 + ); + List< + IngestionShardConsumer.ReadResult< + FakeIngestionSource.FakeIngestionShardPointer, + FakeIngestionSource.FakeIngestionMessage>> readResultsBatch2 = fakeConsumer.readNext(fakeConsumer.nextPointer(), 2, 100); + IngestionShardConsumer mockConsumer = mock(IngestionShardConsumer.class); + when(mockConsumer.getShardId()).thenReturn(0); + when(mockConsumer.readNext(any(), anyLong(), anyInt())).thenThrow(new RuntimeException("message1 poll failed")) + .thenReturn(readResultsBatch1) + .thenReturn(readResultsBatch2) + .thenReturn(Collections.emptyList()); + + IngestionErrorStrategy errorStrategy = spy(new BlockIngestionErrorStrategy("ingestion_source")); + poller = new DefaultStreamPoller( + new FakeIngestionSource.FakeIngestionShardPointer(0), + persistedPointers, + mockConsumer, + processorRunnable, + StreamPoller.ResetState.NONE, + "", + errorStrategy + ); + poller.start(); + Thread.sleep(sleepTime); + + verify(errorStrategy, times(1)).handleError(any(), eq(IngestionErrorStrategy.ErrorStage.POLLING)); + verify(processor, never()).process(any(), any()); + assertEquals(DefaultStreamPoller.State.PAUSED, poller.getState()); + assertTrue(poller.isPaused()); + } + + public void testProcessingErrorWithBlockErrorIngestionStrategy() throws TimeoutException, InterruptedException { + messages.add("{\"_id\":\"3\",\"_source\":{\"name\":\"bob\", \"age\": 24}}".getBytes(StandardCharsets.UTF_8)); + messages.add("{\"_id\":\"4\",\"_source\":{\"name\":\"alice\", \"age\": 21}}".getBytes(StandardCharsets.UTF_8)); + + doThrow(new RuntimeException("Error processing update")).when(processor).process(any(), any()); + BlockIngestionErrorStrategy mockErrorStrategy = spy(new BlockIngestionErrorStrategy("ingestion_source")); + processorRunnable = new MessageProcessorRunnable(new ArrayBlockingQueue<>(5), processor, mockErrorStrategy); + + poller = new DefaultStreamPoller( + new FakeIngestionSource.FakeIngestionShardPointer(0), + persistedPointers, + fakeConsumer, + processorRunnable, + StreamPoller.ResetState.NONE, + "", + mockErrorStrategy + ); + poller.start(); + Thread.sleep(sleepTime); + + verify(mockErrorStrategy, times(1)).handleError(any(), eq(IngestionErrorStrategy.ErrorStage.PROCESSING)); + verify(processor, times(1)).process(any(), any()); + // poller will continue to poll if an error is encountered during message processing but will be blocked by + // the write to blockingQueue + assertEquals(DefaultStreamPoller.State.POLLING, poller.getState()); + } } From 09af5184cc097d36d10ea2b418b17d4d252cb5de Mon Sep 17 00:00:00 2001 From: Peter Zhu Date: Tue, 4 Mar 2025 19:19:37 -0500 Subject: [PATCH 41/48] Bump software.amazon.awssdk from 2.20.86 to 2.30.31 (#17396) Signed-off-by: Peter Zhu Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + gradle/libs.versions.toml | 3 +- plugins/crypto-kms/build.gradle | 3 + .../licenses/annotations-2.20.86.jar.sha1 | 1 - .../licenses/annotations-2.30.31.jar.sha1 | 1 + .../licenses/apache-client-2.20.86.jar.sha1 | 1 - .../licenses/apache-client-2.30.31.jar.sha1 | 1 + .../crypto-kms/licenses/auth-2.20.86.jar.sha1 | 1 - .../crypto-kms/licenses/auth-2.30.31.jar.sha1 | 1 + .../licenses/aws-core-2.20.86.jar.sha1 | 1 - .../licenses/aws-core-2.30.31.jar.sha1 | 1 + .../aws-json-protocol-2.20.86.jar.sha1 | 1 - .../aws-json-protocol-2.30.31.jar.sha1 | 1 + .../aws-query-protocol-2.20.86.jar.sha1 | 1 - .../aws-query-protocol-2.30.31.jar.sha1 | 1 + .../licenses/endpoints-spi-2.20.86.jar.sha1 | 1 - .../licenses/endpoints-spi-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-spi-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/http-auth-spi-NOTICE.txt | 14 ++ .../licenses/http-client-spi-2.20.86.jar.sha1 | 1 - .../licenses/http-client-spi-2.30.31.jar.sha1 | 1 + .../licenses/identity-spi-2.30.31.jar.sha1 | 1 + .../licenses/identity-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/identity-spi-NOTICE.txt | 14 ++ .../licenses/json-utils-2.20.86.jar.sha1 | 1 - .../licenses/json-utils-2.30.31.jar.sha1 | 1 + .../crypto-kms/licenses/kms-2.20.86.jar.sha1 | 1 - .../crypto-kms/licenses/kms-2.30.31.jar.sha1 | 1 + .../licenses/metrics-spi-2.20.86.jar.sha1 | 1 - .../licenses/metrics-spi-2.30.31.jar.sha1 | 1 + .../licenses/profiles-2.20.86.jar.sha1 | 1 - .../licenses/profiles-2.30.31.jar.sha1 | 1 + .../licenses/protocol-core-2.20.86.jar.sha1 | 1 - .../licenses/protocol-core-2.30.31.jar.sha1 | 1 + .../licenses/regions-2.20.86.jar.sha1 | 1 - .../licenses/regions-2.30.31.jar.sha1 | 1 + .../licenses/retries-spi-2.30.31.jar.sha1 | 1 + .../licenses/retries-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/retries-spi-NOTICE.txt | 14 ++ .../licenses/sdk-core-2.20.86.jar.sha1 | 1 - .../licenses/sdk-core-2.30.31.jar.sha1 | 1 + .../third-party-jackson-core-2.20.86.jar.sha1 | 1 - .../third-party-jackson-core-2.30.31.jar.sha1 | 1 + .../licenses/utils-2.20.86.jar.sha1 | 1 - .../licenses/utils-2.30.31.jar.sha1 | 1 + .../crypto/kms/KmsServiceTests.java | 17 +- plugins/discovery-ec2/build.gradle | 13 +- .../licenses/annotations-2.20.86.jar.sha1 | 1 - .../licenses/annotations-2.30.31.jar.sha1 | 1 + .../licenses/apache-client-2.20.86.jar.sha1 | 1 - .../licenses/apache-client-2.30.31.jar.sha1 | 1 + .../licenses/auth-2.20.86.jar.sha1 | 1 - .../licenses/auth-2.30.31.jar.sha1 | 1 + .../licenses/aws-core-2.20.86.jar.sha1 | 1 - .../licenses/aws-core-2.30.31.jar.sha1 | 1 + .../licenses/aws-crt-0.35.0.jar.sha1 | 1 + .../licenses/aws-crt-LICENSE.txt | 202 ++++++++++++++++++ .../discovery-ec2/licenses/aws-crt-NOTICE.txt | 14 ++ .../aws-json-protocol-2.20.86.jar.sha1 | 1 - .../aws-json-protocol-2.30.31.jar.sha1 | 1 + .../aws-query-protocol-2.20.86.jar.sha1 | 1 - .../aws-query-protocol-2.30.31.jar.sha1 | 1 + .../licenses/checksums-2.30.31.jar.sha1 | 1 + .../licenses/checksums-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/checksums-NOTICE.txt | 14 ++ .../licenses/checksums-spi-2.30.31.jar.sha1 | 1 + .../licenses/checksums-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/checksums-spi-NOTICE.txt | 14 ++ .../licenses/ec2-2.20.86.jar.sha1 | 1 - .../licenses/ec2-2.30.31.jar.sha1 | 1 + .../licenses/endpoints-spi-2.20.86.jar.sha1 | 1 - .../licenses/endpoints-spi-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/http-auth-NOTICE.txt | 14 ++ .../licenses/http-auth-aws-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-aws-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/http-auth-aws-NOTICE.txt | 14 ++ .../licenses/http-auth-spi-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/http-auth-spi-NOTICE.txt | 14 ++ .../licenses/http-client-spi-2.20.86.jar.sha1 | 1 - .../licenses/http-client-spi-2.30.31.jar.sha1 | 1 + .../licenses/identity-spi-2.30.31.jar.sha1 | 1 + .../licenses/identity-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/identity-spi-NOTICE.txt | 14 ++ .../licenses/json-utils-2.20.86.jar.sha1 | 1 - .../licenses/json-utils-2.30.31.jar.sha1 | 1 + .../licenses/metrics-spi-2.20.86.jar.sha1 | 1 - .../licenses/metrics-spi-2.30.31.jar.sha1 | 1 + .../licenses/profiles-2.20.86.jar.sha1 | 1 - .../licenses/profiles-2.30.31.jar.sha1 | 1 + .../licenses/protocol-core-2.20.86.jar.sha1 | 1 - .../licenses/protocol-core-2.30.31.jar.sha1 | 1 + .../licenses/regions-2.20.86.jar.sha1 | 1 - .../licenses/regions-2.30.31.jar.sha1 | 1 + .../licenses/retries-2.30.31.jar.sha1 | 1 + .../licenses/retries-LICENSE.txt | 202 ++++++++++++++++++ .../discovery-ec2/licenses/retries-NOTICE.txt | 14 ++ .../licenses/retries-spi-2.30.31.jar.sha1 | 1 + .../licenses/retries-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/retries-spi-NOTICE.txt | 14 ++ .../licenses/sdk-core-2.20.86.jar.sha1 | 1 - .../licenses/sdk-core-2.30.31.jar.sha1 | 1 + .../third-party-jackson-core-2.20.86.jar.sha1 | 1 - .../third-party-jackson-core-2.30.31.jar.sha1 | 1 + .../licenses/utils-2.20.86.jar.sha1 | 1 - .../licenses/utils-2.30.31.jar.sha1 | 1 + .../discovery/ec2/AwsEc2ServiceImplTests.java | 2 +- .../ec2/Ec2DiscoveryPluginTests.java | 15 +- plugins/repository-s3/build.gradle | 38 ++-- .../licenses/annotations-2.20.86.jar.sha1 | 1 - .../licenses/annotations-2.30.31.jar.sha1 | 1 + .../licenses/apache-client-2.20.86.jar.sha1 | 1 - .../licenses/apache-client-2.30.31.jar.sha1 | 1 + .../licenses/auth-2.20.86.jar.sha1 | 1 - .../licenses/auth-2.30.31.jar.sha1 | 1 + .../licenses/aws-core-2.20.86.jar.sha1 | 1 - .../licenses/aws-core-2.30.31.jar.sha1 | 1 + .../licenses/aws-crt-0.35.0.jar.sha1 | 1 + .../licenses/aws-crt-LICENSE.txt | 202 ++++++++++++++++++ .../repository-s3/licenses/aws-crt-NOTICE.txt | 14 ++ .../aws-json-protocol-2.20.86.jar.sha1 | 1 - .../aws-json-protocol-2.30.31.jar.sha1 | 1 + .../aws-query-protocol-2.20.86.jar.sha1 | 1 - .../aws-query-protocol-2.30.31.jar.sha1 | 1 + .../aws-xml-protocol-2.20.86.jar.sha1 | 1 - .../aws-xml-protocol-2.30.31.jar.sha1 | 1 + .../licenses/checksums-2.30.31.jar.sha1 | 1 + .../licenses/checksums-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/checksums-NOTICE.txt | 14 ++ .../licenses/checksums-spi-2.30.31.jar.sha1 | 1 + .../licenses/checksums-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/checksums-spi-NOTICE.txt | 14 ++ .../licenses/endpoints-spi-2.20.86.jar.sha1 | 1 - .../licenses/endpoints-spi-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/http-auth-NOTICE.txt | 14 ++ .../licenses/http-auth-aws-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-aws-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/http-auth-aws-NOTICE.txt | 14 ++ .../licenses/http-auth-spi-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/http-auth-spi-NOTICE.txt | 14 ++ .../licenses/http-client-spi-2.20.86.jar.sha1 | 1 - .../licenses/http-client-spi-2.30.31.jar.sha1 | 1 + .../licenses/identity-spi-2.30.31.jar.sha1 | 1 + .../licenses/identity-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/identity-spi-NOTICE.txt | 14 ++ .../licenses/json-utils-2.20.86.jar.sha1 | 1 - .../licenses/json-utils-2.30.31.jar.sha1 | 1 + .../licenses/metrics-spi-2.20.86.jar.sha1 | 1 - .../licenses/metrics-spi-2.30.31.jar.sha1 | 1 + .../netty-nio-client-2.20.86.jar.sha1 | 1 - .../netty-nio-client-2.30.31.jar.sha1 | 1 + .../licenses/profiles-2.20.86.jar.sha1 | 1 - .../licenses/profiles-2.30.31.jar.sha1 | 1 + .../licenses/protocol-core-2.20.86.jar.sha1 | 1 - .../licenses/protocol-core-2.30.31.jar.sha1 | 1 + .../licenses/regions-2.20.86.jar.sha1 | 1 - .../licenses/regions-2.30.31.jar.sha1 | 1 + .../licenses/retries-2.30.31.jar.sha1 | 1 + .../licenses/retries-LICENSE.txt | 202 ++++++++++++++++++ .../repository-s3/licenses/retries-NOTICE.txt | 14 ++ .../licenses/retries-spi-2.30.31.jar.sha1 | 1 + .../licenses/retries-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/retries-spi-NOTICE.txt | 14 ++ .../licenses/s3-2.20.86.jar.sha1 | 1 - .../licenses/s3-2.30.31.jar.sha1 | 1 + .../licenses/sdk-core-2.20.86.jar.sha1 | 1 - .../licenses/sdk-core-2.30.31.jar.sha1 | 1 + .../licenses/signer-2.20.86.jar.sha1 | 1 - .../licenses/signer-2.30.31.jar.sha1 | 1 + .../licenses/sts-2.20.86.jar.sha1 | 1 - .../licenses/sts-2.30.31.jar.sha1 | 1 + .../third-party-jackson-core-2.20.86.jar.sha1 | 1 - .../third-party-jackson-core-2.30.31.jar.sha1 | 1 + .../licenses/utils-2.20.86.jar.sha1 | 1 - .../licenses/utils-2.30.31.jar.sha1 | 1 + 181 files changed, 4650 insertions(+), 109 deletions(-) delete mode 100644 plugins/crypto-kms/licenses/annotations-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/annotations-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/apache-client-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/apache-client-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/auth-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/auth-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/aws-core-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/aws-core-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/aws-json-protocol-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/aws-json-protocol-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/aws-query-protocol-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/aws-query-protocol-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/endpoints-spi-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/endpoints-spi-2.30.31.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/http-auth-spi-2.30.31.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/http-auth-spi-LICENSE.txt create mode 100644 plugins/crypto-kms/licenses/http-auth-spi-NOTICE.txt delete mode 100644 plugins/crypto-kms/licenses/http-client-spi-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/http-client-spi-2.30.31.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/identity-spi-2.30.31.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/identity-spi-LICENSE.txt create mode 100644 plugins/crypto-kms/licenses/identity-spi-NOTICE.txt delete mode 100644 plugins/crypto-kms/licenses/json-utils-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/json-utils-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/kms-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/kms-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/metrics-spi-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/metrics-spi-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/profiles-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/profiles-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/protocol-core-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/protocol-core-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/regions-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/regions-2.30.31.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/retries-spi-2.30.31.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/retries-spi-LICENSE.txt create mode 100644 plugins/crypto-kms/licenses/retries-spi-NOTICE.txt delete mode 100644 plugins/crypto-kms/licenses/sdk-core-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/sdk-core-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/third-party-jackson-core-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/third-party-jackson-core-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/utils-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/utils-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/annotations-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/annotations-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/apache-client-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/apache-client-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/auth-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/auth-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/aws-core-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-core-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-crt-0.35.0.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-crt-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/aws-crt-NOTICE.txt delete mode 100644 plugins/discovery-ec2/licenses/aws-json-protocol-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-json-protocol-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/aws-query-protocol-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-query-protocol-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/checksums-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/checksums-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/checksums-NOTICE.txt create mode 100644 plugins/discovery-ec2/licenses/checksums-spi-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/checksums-spi-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/checksums-spi-NOTICE.txt delete mode 100644 plugins/discovery-ec2/licenses/ec2-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/ec2-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/endpoints-spi-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/endpoints-spi-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/http-auth-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/http-auth-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/http-auth-NOTICE.txt create mode 100644 plugins/discovery-ec2/licenses/http-auth-aws-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/http-auth-aws-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/http-auth-aws-NOTICE.txt create mode 100644 plugins/discovery-ec2/licenses/http-auth-spi-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/http-auth-spi-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/http-auth-spi-NOTICE.txt delete mode 100644 plugins/discovery-ec2/licenses/http-client-spi-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/http-client-spi-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/identity-spi-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/identity-spi-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/identity-spi-NOTICE.txt delete mode 100644 plugins/discovery-ec2/licenses/json-utils-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/json-utils-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/metrics-spi-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/metrics-spi-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/profiles-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/profiles-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/protocol-core-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/protocol-core-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/regions-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/regions-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/retries-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/retries-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/retries-NOTICE.txt create mode 100644 plugins/discovery-ec2/licenses/retries-spi-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/retries-spi-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/retries-spi-NOTICE.txt delete mode 100644 plugins/discovery-ec2/licenses/sdk-core-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/sdk-core-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/third-party-jackson-core-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/utils-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/utils-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/annotations-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/annotations-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/apache-client-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/apache-client-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/auth-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/auth-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/aws-core-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-core-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-crt-0.35.0.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-crt-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/aws-crt-NOTICE.txt delete mode 100644 plugins/repository-s3/licenses/aws-json-protocol-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-json-protocol-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/aws-query-protocol-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-query-protocol-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/aws-xml-protocol-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-xml-protocol-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/checksums-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/checksums-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/checksums-NOTICE.txt create mode 100644 plugins/repository-s3/licenses/checksums-spi-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/checksums-spi-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/checksums-spi-NOTICE.txt delete mode 100644 plugins/repository-s3/licenses/endpoints-spi-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/endpoints-spi-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/http-auth-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/http-auth-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/http-auth-NOTICE.txt create mode 100644 plugins/repository-s3/licenses/http-auth-aws-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/http-auth-aws-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/http-auth-aws-NOTICE.txt create mode 100644 plugins/repository-s3/licenses/http-auth-spi-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/http-auth-spi-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/http-auth-spi-NOTICE.txt delete mode 100644 plugins/repository-s3/licenses/http-client-spi-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/http-client-spi-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/identity-spi-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/identity-spi-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/identity-spi-NOTICE.txt delete mode 100644 plugins/repository-s3/licenses/json-utils-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/json-utils-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/metrics-spi-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/metrics-spi-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-nio-client-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-nio-client-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/profiles-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/profiles-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/protocol-core-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/protocol-core-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/regions-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/regions-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/retries-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/retries-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/retries-NOTICE.txt create mode 100644 plugins/repository-s3/licenses/retries-spi-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/retries-spi-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/retries-spi-NOTICE.txt delete mode 100644 plugins/repository-s3/licenses/s3-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/s3-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/sdk-core-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/sdk-core-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/signer-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/signer-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/sts-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/sts-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/third-party-jackson-core-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/third-party-jackson-core-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/utils-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/utils-2.30.31.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 45fd4813e72da..091e832e01cd1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.api.grpc:proto-google-common-protos` from 2.37.1 to 2.52.0 ([#17379](https://github.com/opensearch-project/OpenSearch/pull/17379)) - Bump `net.minidev:json-smart` from 2.5.1 to 2.5.2 ([#17378](https://github.com/opensearch-project/OpenSearch/pull/17378)) - Bump `com.netflix.nebula.ospackage-base` from 11.10.1 to 11.11.1 ([#17374](https://github.com/opensearch-project/OpenSearch/pull/17374)) +- Bump `software.amazon.awssdk` from 2.20.86 to 2.30.31 ([17396](https://github.com/opensearch-project/OpenSearch/pull/17396)) ### Changed - Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233) diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index abdd87394b35c..90518ca71ec53 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -52,7 +52,8 @@ commonslang = "3.14.0" commonscompress = "1.26.1" commonsio = "2.16.0" # plugin dependencies -aws = "2.20.86" +aws = "2.30.31" +awscrt = "0.35.0" reactivestreams = "1.0.4" # when updating this version, you need to ensure compatibility with: diff --git a/plugins/crypto-kms/build.gradle b/plugins/crypto-kms/build.gradle index fa63a4a7153d3..d66c731dc16af 100644 --- a/plugins/crypto-kms/build.gradle +++ b/plugins/crypto-kms/build.gradle @@ -30,10 +30,13 @@ dependencies { api "software.amazon.awssdk:aws-core:${versions.aws}" api "software.amazon.awssdk:utils:${versions.aws}" api "software.amazon.awssdk:auth:${versions.aws}" + api "software.amazon.awssdk:identity-spi:${versions.aws}" api "software.amazon.awssdk:kms:${versions.aws}" + api "software.amazon.awssdk:http-auth-spi:${versions.aws}" api "software.amazon.awssdk:http-client-spi:${versions.aws}" api "software.amazon.awssdk:apache-client:${versions.aws}" api "software.amazon.awssdk:regions:${versions.aws}" + api "software.amazon.awssdk:retries-spi:${versions.aws}" api "software.amazon.awssdk:profiles:${versions.aws}" api "software.amazon.awssdk:endpoints-spi:${versions.aws}" api "software.amazon.awssdk:annotations:${versions.aws}" diff --git a/plugins/crypto-kms/licenses/annotations-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/annotations-2.20.86.jar.sha1 deleted file mode 100644 index 8d30ad649916b..0000000000000 --- a/plugins/crypto-kms/licenses/annotations-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e785e9ecb1230e52e9daa713335f38809ddcb74 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/annotations-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/annotations-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d45f8758c9405 --- /dev/null +++ b/plugins/crypto-kms/licenses/annotations-2.30.31.jar.sha1 @@ -0,0 +1 @@ +c5acc1da9567290302d80ffa1633785afa4ce630 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/apache-client-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/apache-client-2.20.86.jar.sha1 deleted file mode 100644 index e7ae36581925c..0000000000000 --- a/plugins/crypto-kms/licenses/apache-client-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af31c4d3abec23b73061c6965364a6e3abbcc01a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/apache-client-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/apache-client-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..97331cbda2c1b --- /dev/null +++ b/plugins/crypto-kms/licenses/apache-client-2.30.31.jar.sha1 @@ -0,0 +1 @@ +d1c602dba702782a0afec0a08c919322693a3bf8 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/auth-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/auth-2.20.86.jar.sha1 deleted file mode 100644 index e4c1b29cea894..0000000000000 --- a/plugins/crypto-kms/licenses/auth-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f2da82f33776ce4814a3ab53b5ccb82a5d135936 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/auth-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/auth-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..c1e199ca02fc8 --- /dev/null +++ b/plugins/crypto-kms/licenses/auth-2.30.31.jar.sha1 @@ -0,0 +1 @@ +8887962b04ce5f1a9f46d44acd806949b17082da \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/aws-core-2.20.86.jar.sha1 deleted file mode 100644 index d42a15c4da413..0000000000000 --- a/plugins/crypto-kms/licenses/aws-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea126147c3d17a8b3075e3122ec9c2b94fe1f6d5 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-core-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/aws-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..16050fd1d8c6d --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +5016fadbd7146171b4afe09eb0675b710b0f2d12 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-json-protocol-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/aws-json-protocol-2.20.86.jar.sha1 deleted file mode 100644 index ee08d240fbfba..0000000000000 --- a/plugins/crypto-kms/licenses/aws-json-protocol-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b9d09c1aa9d3f2119267f0b6549ae1810512c7b \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-json-protocol-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/aws-json-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..bfc742d8687d1 --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-json-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +4600659276f84e114c1fabeb1478911c581a7739 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-query-protocol-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/aws-query-protocol-2.20.86.jar.sha1 deleted file mode 100644 index 9b19f570d56fb..0000000000000 --- a/plugins/crypto-kms/licenses/aws-query-protocol-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e001792ec1a681f5bc6ee4157d572173416304ad \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-query-protocol-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/aws-query-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..9508295147c96 --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-query-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +61596c0cb577a4a6c438a5a7ee0391d2d825b3fe \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/endpoints-spi-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/endpoints-spi-2.20.86.jar.sha1 deleted file mode 100644 index 16f9db1fd6327..0000000000000 --- a/plugins/crypto-kms/licenses/endpoints-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b9075dd0ed32da97f95229f55c01425353e8cba \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/endpoints-spi-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/endpoints-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..4dbc884c3da6f --- /dev/null +++ b/plugins/crypto-kms/licenses/endpoints-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +0734f4b9c68f19201896dd47639035b4e0a7964d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/http-auth-spi-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/http-auth-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..491ffe4dd0584 --- /dev/null +++ b/plugins/crypto-kms/licenses/http-auth-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +513519f79635441d5205fc31d56c2e0d5826d27f \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/http-auth-spi-LICENSE.txt b/plugins/crypto-kms/licenses/http-auth-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/http-auth-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/http-auth-spi-NOTICE.txt b/plugins/crypto-kms/licenses/http-auth-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/http-auth-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/http-client-spi-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/http-client-spi-2.20.86.jar.sha1 deleted file mode 100644 index 0662e15b1f3e6..0000000000000 --- a/plugins/crypto-kms/licenses/http-client-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -776bfc86fabc6e8c792ea4650a281d0bec5e9708 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/http-client-spi-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/http-client-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d86fa139f535c --- /dev/null +++ b/plugins/crypto-kms/licenses/http-client-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +5fa894c333793b7481aa03aa87512b20e11b057d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/identity-spi-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/identity-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..9eeab9ad13dba --- /dev/null +++ b/plugins/crypto-kms/licenses/identity-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +46da74ac074b176c25fba07c6541737422622c1d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/identity-spi-LICENSE.txt b/plugins/crypto-kms/licenses/identity-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/identity-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/identity-spi-NOTICE.txt b/plugins/crypto-kms/licenses/identity-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/identity-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/json-utils-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/json-utils-2.20.86.jar.sha1 deleted file mode 100644 index 7011f8c3e6c78..0000000000000 --- a/plugins/crypto-kms/licenses/json-utils-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5dd418ad48e3bfd8c3fa05ff29a955b91c1af666 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/json-utils-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/json-utils-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..5019f6d48fa0a --- /dev/null +++ b/plugins/crypto-kms/licenses/json-utils-2.30.31.jar.sha1 @@ -0,0 +1 @@ +7f0ef4b49299df2fd39f92113d94524729c61032 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/kms-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/kms-2.20.86.jar.sha1 deleted file mode 100644 index 32c4e9f432898..0000000000000 --- a/plugins/crypto-kms/licenses/kms-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6a81c2f14acaa7b9dcdc80c715d6e44d815a818a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/kms-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/kms-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..becd3d624ef17 --- /dev/null +++ b/plugins/crypto-kms/licenses/kms-2.30.31.jar.sha1 @@ -0,0 +1 @@ +0bb8a87a83edf1eb0c4dddb2afb1158ac858626d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/metrics-spi-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/metrics-spi-2.20.86.jar.sha1 deleted file mode 100644 index bbd88bb9e1b0c..0000000000000 --- a/plugins/crypto-kms/licenses/metrics-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74a65d0f8decd0b3057fb500ca5409ff5778752a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/metrics-spi-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/metrics-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..69ab3ec6f79ff --- /dev/null +++ b/plugins/crypto-kms/licenses/metrics-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +57a979cbc99d0bf4113d96aaf4f453303a015966 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/profiles-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/profiles-2.20.86.jar.sha1 deleted file mode 100644 index 425ce9b92f9f2..0000000000000 --- a/plugins/crypto-kms/licenses/profiles-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -27a8f4aa488d1d3ef947865ee0190f16d10a3cc7 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/profiles-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/profiles-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..6d4d2a1ac8d65 --- /dev/null +++ b/plugins/crypto-kms/licenses/profiles-2.30.31.jar.sha1 @@ -0,0 +1 @@ +d6d2d5788695972140dfe8b012ea7ccd97b82eef \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/protocol-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/protocol-core-2.20.86.jar.sha1 deleted file mode 100644 index 8de58699d8d82..0000000000000 --- a/plugins/crypto-kms/licenses/protocol-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bd85984ac6327a50d20e7957ecebf4fa3ad7766b \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/protocol-core-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/protocol-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..caae2a4302976 --- /dev/null +++ b/plugins/crypto-kms/licenses/protocol-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +ee17b25525aee497b6d520c8e499f39de7204fbc \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/regions-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/regions-2.20.86.jar.sha1 deleted file mode 100644 index 266bc76ad6f77..0000000000000 --- a/plugins/crypto-kms/licenses/regions-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04fd460ce1c633986ecef1b4218d3e7067a7087d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/regions-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/regions-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..8e9876686a144 --- /dev/null +++ b/plugins/crypto-kms/licenses/regions-2.30.31.jar.sha1 @@ -0,0 +1 @@ +7ce1df66496dcf9b124edb78ab9675e1e7d5c427 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/retries-spi-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/retries-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..854e3d7e4aebf --- /dev/null +++ b/plugins/crypto-kms/licenses/retries-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +4d9166189594243f88045fbf0c871a81e3914c0b \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/retries-spi-LICENSE.txt b/plugins/crypto-kms/licenses/retries-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/retries-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/retries-spi-NOTICE.txt b/plugins/crypto-kms/licenses/retries-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/retries-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/sdk-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/sdk-core-2.20.86.jar.sha1 deleted file mode 100644 index 9eca40e6b9a9a..0000000000000 --- a/plugins/crypto-kms/licenses/sdk-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1b9df9ca5e4918fab05db3b703b2873e83104c30 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/sdk-core-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/sdk-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..ee3d7e3bff68d --- /dev/null +++ b/plugins/crypto-kms/licenses/sdk-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b95c07d4796105c2e61c4c6ab60e3189886b2787 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.86.jar.sha1 deleted file mode 100644 index c9c3d4dc53505..0000000000000 --- a/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ad6e7f7d52d8a5390b2daf2fd8ffcab97fe3102 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/third-party-jackson-core-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/third-party-jackson-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..a07a8eda62447 --- /dev/null +++ b/plugins/crypto-kms/licenses/third-party-jackson-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +100d8022939bd59cd7d2461bd4fb0fd9fa028499 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/utils-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/utils-2.20.86.jar.sha1 deleted file mode 100644 index b91a3b3047570..0000000000000 --- a/plugins/crypto-kms/licenses/utils-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7a61f8b3c54ecf3dc785830d4f482f19ca52bc57 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/utils-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/utils-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..184ff1cc5f9ce --- /dev/null +++ b/plugins/crypto-kms/licenses/utils-2.30.31.jar.sha1 @@ -0,0 +1 @@ +3340adacb87ff28f90a039d57c81311b296db89e \ No newline at end of file diff --git a/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/KmsServiceTests.java b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/KmsServiceTests.java index 1424cce473592..8d63d1c0eccd7 100644 --- a/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/KmsServiceTests.java +++ b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/KmsServiceTests.java @@ -31,7 +31,7 @@ public void testAWSDefaultConfiguration() { KmsClientSettings.getClientSettings(Settings.EMPTY) ); - assertNull(proxyConfiguration.scheme()); + assertEquals("http", proxyConfiguration.scheme()); assertNull(proxyConfiguration.host()); assertEquals(proxyConfiguration.port(), 0); assertNull(proxyConfiguration.username()); @@ -131,14 +131,11 @@ public void testClientSettingsReInit() { assertTrue(credentials instanceof AwsBasicCredentials); } - assertEquals( - mockKmsClientTest.proxyConfiguration.toString(), - "ProxyConfiguration(endpoint=https://proxy-host-1:881, username=proxy_username_1, preemptiveBasicAuthenticationEnabled=false)" - ); assertEquals(mockKmsClientTest.proxyConfiguration.host(), "proxy-host-1"); assertEquals(mockKmsClientTest.proxyConfiguration.port(), 881); assertEquals(mockKmsClientTest.proxyConfiguration.username(), "proxy_username_1"); assertEquals(mockKmsClientTest.proxyConfiguration.password(), "proxy_password_1"); + assertFalse(mockKmsClientTest.proxyConfiguration.preemptiveBasicAuthenticationEnabled()); } // reload secure settings2 plugin.reload(settings2); @@ -155,14 +152,11 @@ public void testClientSettingsReInit() { assertTrue(credentials instanceof AwsBasicCredentials); } - assertEquals( - mockKmsClientTest.proxyConfiguration.toString(), - "ProxyConfiguration(endpoint=https://proxy-host-1:881, username=proxy_username_1, preemptiveBasicAuthenticationEnabled=false)" - ); assertEquals(mockKmsClientTest.proxyConfiguration.host(), "proxy-host-1"); assertEquals(mockKmsClientTest.proxyConfiguration.port(), 881); assertEquals(mockKmsClientTest.proxyConfiguration.username(), "proxy_username_1"); assertEquals(mockKmsClientTest.proxyConfiguration.password(), "proxy_password_1"); + assertFalse(mockKmsClientTest.proxyConfiguration.preemptiveBasicAuthenticationEnabled()); } } try (AmazonKmsClientReference clientReference = plugin.kmsService.client(cryptoMetadata)) { @@ -179,14 +173,11 @@ public void testClientSettingsReInit() { assertTrue(credentials instanceof AwsBasicCredentials); } - assertEquals( - mockKmsClientTest.proxyConfiguration.toString(), - "ProxyConfiguration(endpoint=https://proxy-host-2:882, username=proxy_username_2, preemptiveBasicAuthenticationEnabled=false)" - ); assertEquals(mockKmsClientTest.proxyConfiguration.host(), "proxy-host-2"); assertEquals(mockKmsClientTest.proxyConfiguration.port(), 882); assertEquals(mockKmsClientTest.proxyConfiguration.username(), "proxy_username_2"); assertEquals(mockKmsClientTest.proxyConfiguration.password(), "proxy_password_2"); + assertFalse(mockKmsClientTest.proxyConfiguration.preemptiveBasicAuthenticationEnabled()); } } } diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 8d615e0bf8d9d..7a7eb8da24fb6 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -43,6 +43,15 @@ dependencies { api "software.amazon.awssdk:aws-core:${versions.aws}" api "software.amazon.awssdk:utils:${versions.aws}" api "software.amazon.awssdk:auth:${versions.aws}" + api "software.amazon.awssdk:identity-spi:${versions.aws}" + api "software.amazon.awssdk:checksums:${versions.aws}" + api "software.amazon.awssdk:checksums-spi:${versions.aws}" + api "software.amazon.awssdk.crt:aws-crt:${versions.awscrt}" + api "software.amazon.awssdk:http-auth:${versions.aws}" + api "software.amazon.awssdk:http-auth-aws:${versions.aws}" + api "software.amazon.awssdk:http-auth-spi:${versions.aws}" + api "software.amazon.awssdk:retries:${versions.aws}" + api "software.amazon.awssdk:retries-spi:${versions.aws}" api "software.amazon.awssdk:ec2:${versions.aws}" api "software.amazon.awssdk:http-client-spi:${versions.aws}" api "software.amazon.awssdk:apache-client:${versions.aws}" @@ -158,6 +167,8 @@ tasks.named("thirdPartyAudit").configure { 'org.slf4j.impl.StaticMarkerBinder', 'software.amazon.eventstream.HeaderValue', 'software.amazon.eventstream.Message', - 'software.amazon.eventstream.MessageDecoder' + 'software.amazon.eventstream.MessageDecoder', + 'org.graalvm.nativeimage.hosted.Feature', + 'org.graalvm.nativeimage.hosted.Feature$AfterImageWriteAccess' ) } diff --git a/plugins/discovery-ec2/licenses/annotations-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/annotations-2.20.86.jar.sha1 deleted file mode 100644 index 8d30ad649916b..0000000000000 --- a/plugins/discovery-ec2/licenses/annotations-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e785e9ecb1230e52e9daa713335f38809ddcb74 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/annotations-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/annotations-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d45f8758c9405 --- /dev/null +++ b/plugins/discovery-ec2/licenses/annotations-2.30.31.jar.sha1 @@ -0,0 +1 @@ +c5acc1da9567290302d80ffa1633785afa4ce630 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/apache-client-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/apache-client-2.20.86.jar.sha1 deleted file mode 100644 index e7ae36581925c..0000000000000 --- a/plugins/discovery-ec2/licenses/apache-client-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af31c4d3abec23b73061c6965364a6e3abbcc01a \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/apache-client-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/apache-client-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..97331cbda2c1b --- /dev/null +++ b/plugins/discovery-ec2/licenses/apache-client-2.30.31.jar.sha1 @@ -0,0 +1 @@ +d1c602dba702782a0afec0a08c919322693a3bf8 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/auth-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/auth-2.20.86.jar.sha1 deleted file mode 100644 index e4c1b29cea894..0000000000000 --- a/plugins/discovery-ec2/licenses/auth-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f2da82f33776ce4814a3ab53b5ccb82a5d135936 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/auth-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/auth-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..c1e199ca02fc8 --- /dev/null +++ b/plugins/discovery-ec2/licenses/auth-2.30.31.jar.sha1 @@ -0,0 +1 @@ +8887962b04ce5f1a9f46d44acd806949b17082da \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/aws-core-2.20.86.jar.sha1 deleted file mode 100644 index d42a15c4da413..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea126147c3d17a8b3075e3122ec9c2b94fe1f6d5 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-core-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/aws-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..16050fd1d8c6d --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +5016fadbd7146171b4afe09eb0675b710b0f2d12 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-crt-0.35.0.jar.sha1 b/plugins/discovery-ec2/licenses/aws-crt-0.35.0.jar.sha1 new file mode 100644 index 0000000000000..1097f5bb4d814 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-crt-0.35.0.jar.sha1 @@ -0,0 +1 @@ +33041403e1a9dd94f40330206eda5ffc22ee185c \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-crt-LICENSE.txt b/plugins/discovery-ec2/licenses/aws-crt-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-crt-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/aws-crt-NOTICE.txt b/plugins/discovery-ec2/licenses/aws-crt-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-crt-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.86.jar.sha1 deleted file mode 100644 index ee08d240fbfba..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b9d09c1aa9d3f2119267f0b6549ae1810512c7b \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-json-protocol-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/aws-json-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..bfc742d8687d1 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-json-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +4600659276f84e114c1fabeb1478911c581a7739 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.86.jar.sha1 deleted file mode 100644 index 9b19f570d56fb..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e001792ec1a681f5bc6ee4157d572173416304ad \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-query-protocol-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/aws-query-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..9508295147c96 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-query-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +61596c0cb577a4a6c438a5a7ee0391d2d825b3fe \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/checksums-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/checksums-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..4447b86f6e872 --- /dev/null +++ b/plugins/discovery-ec2/licenses/checksums-2.30.31.jar.sha1 @@ -0,0 +1 @@ +6d00287bc0ceb013dd5c74f1c4eb296ae61b34d4 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/checksums-LICENSE.txt b/plugins/discovery-ec2/licenses/checksums-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/checksums-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/checksums-NOTICE.txt b/plugins/discovery-ec2/licenses/checksums-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/checksums-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/checksums-spi-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/checksums-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..078cab150c5ad --- /dev/null +++ b/plugins/discovery-ec2/licenses/checksums-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b5a5b0a39403acf41c21fd16cd11c7c8d887601b \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/checksums-spi-LICENSE.txt b/plugins/discovery-ec2/licenses/checksums-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/checksums-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/checksums-spi-NOTICE.txt b/plugins/discovery-ec2/licenses/checksums-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/checksums-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/ec2-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/ec2-2.20.86.jar.sha1 deleted file mode 100644 index 18c43cfc7516d..0000000000000 --- a/plugins/discovery-ec2/licenses/ec2-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3522a0829622a9c80152e6e2528bb79166f0b709 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/ec2-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/ec2-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..e5982e9b99aa7 --- /dev/null +++ b/plugins/discovery-ec2/licenses/ec2-2.30.31.jar.sha1 @@ -0,0 +1 @@ +e1df5c01dc20de548b572d4bcfd75bba360411f2 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/endpoints-spi-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/endpoints-spi-2.20.86.jar.sha1 deleted file mode 100644 index 16f9db1fd6327..0000000000000 --- a/plugins/discovery-ec2/licenses/endpoints-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b9075dd0ed32da97f95229f55c01425353e8cba \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/endpoints-spi-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/endpoints-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..4dbc884c3da6f --- /dev/null +++ b/plugins/discovery-ec2/licenses/endpoints-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +0734f4b9c68f19201896dd47639035b4e0a7964d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/http-auth-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/http-auth-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..79893fb4fbf58 --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b7baeb158b0af0e400d89a32595c9127db2bbb6e \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/http-auth-LICENSE.txt b/plugins/discovery-ec2/licenses/http-auth-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/http-auth-NOTICE.txt b/plugins/discovery-ec2/licenses/http-auth-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/http-auth-aws-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/http-auth-aws-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d190c6ca52e98 --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-aws-2.30.31.jar.sha1 @@ -0,0 +1 @@ +f2a7d383158746c82b0f41b021e0da23a2597b35 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/http-auth-aws-LICENSE.txt b/plugins/discovery-ec2/licenses/http-auth-aws-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-aws-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/http-auth-aws-NOTICE.txt b/plugins/discovery-ec2/licenses/http-auth-aws-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-aws-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/http-auth-spi-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/http-auth-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..491ffe4dd0584 --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +513519f79635441d5205fc31d56c2e0d5826d27f \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/http-auth-spi-LICENSE.txt b/plugins/discovery-ec2/licenses/http-auth-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/http-auth-spi-NOTICE.txt b/plugins/discovery-ec2/licenses/http-auth-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/http-client-spi-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/http-client-spi-2.20.86.jar.sha1 deleted file mode 100644 index 0662e15b1f3e6..0000000000000 --- a/plugins/discovery-ec2/licenses/http-client-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -776bfc86fabc6e8c792ea4650a281d0bec5e9708 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/http-client-spi-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/http-client-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d86fa139f535c --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-client-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +5fa894c333793b7481aa03aa87512b20e11b057d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/identity-spi-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/identity-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..9eeab9ad13dba --- /dev/null +++ b/plugins/discovery-ec2/licenses/identity-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +46da74ac074b176c25fba07c6541737422622c1d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/identity-spi-LICENSE.txt b/plugins/discovery-ec2/licenses/identity-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/identity-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/identity-spi-NOTICE.txt b/plugins/discovery-ec2/licenses/identity-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/identity-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/json-utils-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/json-utils-2.20.86.jar.sha1 deleted file mode 100644 index 7011f8c3e6c78..0000000000000 --- a/plugins/discovery-ec2/licenses/json-utils-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5dd418ad48e3bfd8c3fa05ff29a955b91c1af666 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/json-utils-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/json-utils-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..5019f6d48fa0a --- /dev/null +++ b/plugins/discovery-ec2/licenses/json-utils-2.30.31.jar.sha1 @@ -0,0 +1 @@ +7f0ef4b49299df2fd39f92113d94524729c61032 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/metrics-spi-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/metrics-spi-2.20.86.jar.sha1 deleted file mode 100644 index bbd88bb9e1b0c..0000000000000 --- a/plugins/discovery-ec2/licenses/metrics-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74a65d0f8decd0b3057fb500ca5409ff5778752a \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/metrics-spi-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/metrics-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..69ab3ec6f79ff --- /dev/null +++ b/plugins/discovery-ec2/licenses/metrics-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +57a979cbc99d0bf4113d96aaf4f453303a015966 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/profiles-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/profiles-2.20.86.jar.sha1 deleted file mode 100644 index 425ce9b92f9f2..0000000000000 --- a/plugins/discovery-ec2/licenses/profiles-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -27a8f4aa488d1d3ef947865ee0190f16d10a3cc7 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/profiles-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/profiles-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..6d4d2a1ac8d65 --- /dev/null +++ b/plugins/discovery-ec2/licenses/profiles-2.30.31.jar.sha1 @@ -0,0 +1 @@ +d6d2d5788695972140dfe8b012ea7ccd97b82eef \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/protocol-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/protocol-core-2.20.86.jar.sha1 deleted file mode 100644 index 8de58699d8d82..0000000000000 --- a/plugins/discovery-ec2/licenses/protocol-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bd85984ac6327a50d20e7957ecebf4fa3ad7766b \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/protocol-core-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/protocol-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..caae2a4302976 --- /dev/null +++ b/plugins/discovery-ec2/licenses/protocol-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +ee17b25525aee497b6d520c8e499f39de7204fbc \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/regions-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/regions-2.20.86.jar.sha1 deleted file mode 100644 index 266bc76ad6f77..0000000000000 --- a/plugins/discovery-ec2/licenses/regions-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04fd460ce1c633986ecef1b4218d3e7067a7087d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/regions-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/regions-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..8e9876686a144 --- /dev/null +++ b/plugins/discovery-ec2/licenses/regions-2.30.31.jar.sha1 @@ -0,0 +1 @@ +7ce1df66496dcf9b124edb78ab9675e1e7d5c427 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/retries-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/retries-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..98b46e3439ac7 --- /dev/null +++ b/plugins/discovery-ec2/licenses/retries-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b490f67c9d3f000ae40928d9aa3c9debceac0966 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/retries-LICENSE.txt b/plugins/discovery-ec2/licenses/retries-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/retries-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/retries-NOTICE.txt b/plugins/discovery-ec2/licenses/retries-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/retries-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/retries-spi-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/retries-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..854e3d7e4aebf --- /dev/null +++ b/plugins/discovery-ec2/licenses/retries-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +4d9166189594243f88045fbf0c871a81e3914c0b \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/retries-spi-LICENSE.txt b/plugins/discovery-ec2/licenses/retries-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/retries-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/retries-spi-NOTICE.txt b/plugins/discovery-ec2/licenses/retries-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/retries-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/sdk-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/sdk-core-2.20.86.jar.sha1 deleted file mode 100644 index 9eca40e6b9a9a..0000000000000 --- a/plugins/discovery-ec2/licenses/sdk-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1b9df9ca5e4918fab05db3b703b2873e83104c30 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/sdk-core-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/sdk-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..ee3d7e3bff68d --- /dev/null +++ b/plugins/discovery-ec2/licenses/sdk-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b95c07d4796105c2e61c4c6ab60e3189886b2787 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.86.jar.sha1 deleted file mode 100644 index c9c3d4dc53505..0000000000000 --- a/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ad6e7f7d52d8a5390b2daf2fd8ffcab97fe3102 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/third-party-jackson-core-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/third-party-jackson-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..a07a8eda62447 --- /dev/null +++ b/plugins/discovery-ec2/licenses/third-party-jackson-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +100d8022939bd59cd7d2461bd4fb0fd9fa028499 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/utils-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/utils-2.20.86.jar.sha1 deleted file mode 100644 index b91a3b3047570..0000000000000 --- a/plugins/discovery-ec2/licenses/utils-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7a61f8b3c54ecf3dc785830d4f482f19ca52bc57 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/utils-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/utils-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..184ff1cc5f9ce --- /dev/null +++ b/plugins/discovery-ec2/licenses/utils-2.30.31.jar.sha1 @@ -0,0 +1 @@ +3340adacb87ff28f90a039d57c81311b296db89e \ No newline at end of file diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java index 3164abe456515..d5f1a5e2d0e45 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java @@ -131,7 +131,7 @@ public void testAWSDefaultConfiguration() { Ec2ClientSettings.getClientSettings(Settings.EMPTY) ); - assertNull(proxyConfiguration.scheme()); + assertEquals("http", proxyConfiguration.scheme()); assertNull(proxyConfiguration.host()); assertThat(proxyConfiguration.port(), is(0)); assertNull(proxyConfiguration.username()); diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java index bde508a0afe96..40c7ba4fc53d7 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java @@ -180,14 +180,11 @@ public void testClientSettingsReInit() throws IOException { assertThat(credentials, instanceOf(AwsBasicCredentials.class)); } - assertEquals( - mockEc2Client.proxyConfiguration.toString(), - "ProxyConfiguration(endpoint=https://proxy-host-1:881, username=proxy_username_1, preemptiveBasicAuthenticationEnabled=false)" - ); assertEquals(mockEc2Client.proxyConfiguration.host(), "proxy-host-1"); assertEquals(mockEc2Client.proxyConfiguration.port(), 881); assertEquals(mockEc2Client.proxyConfiguration.username(), "proxy_username_1"); assertEquals(mockEc2Client.proxyConfiguration.password(), "proxy_password_1"); + assertFalse(mockEc2Client.proxyConfiguration.preemptiveBasicAuthenticationEnabled()); } // reload secure settings2 plugin.reload(settings2); @@ -204,14 +201,11 @@ public void testClientSettingsReInit() throws IOException { assertThat(credentials, instanceOf(AwsBasicCredentials.class)); } - assertEquals( - mockEc2Client.proxyConfiguration.toString(), - "ProxyConfiguration(endpoint=https://proxy-host-1:881, username=proxy_username_1, preemptiveBasicAuthenticationEnabled=false)" - ); assertEquals(mockEc2Client.proxyConfiguration.host(), "proxy-host-1"); assertEquals(mockEc2Client.proxyConfiguration.port(), 881); assertEquals(mockEc2Client.proxyConfiguration.username(), "proxy_username_1"); assertEquals(mockEc2Client.proxyConfiguration.password(), "proxy_password_1"); + assertFalse(mockEc2Client.proxyConfiguration.preemptiveBasicAuthenticationEnabled()); } } try (AmazonEc2ClientReference clientReference = plugin.ec2Service.client()) { @@ -228,14 +222,11 @@ public void testClientSettingsReInit() throws IOException { assertThat(credentials, instanceOf(AwsBasicCredentials.class)); } - assertEquals( - mockEc2Client.proxyConfiguration.toString(), - "ProxyConfiguration(endpoint=https://proxy-host-2:882, username=proxy_username_2, preemptiveBasicAuthenticationEnabled=false)" - ); assertEquals(mockEc2Client.proxyConfiguration.host(), "proxy-host-2"); assertEquals(mockEc2Client.proxyConfiguration.port(), 882); assertEquals(mockEc2Client.proxyConfiguration.username(), "proxy_username_2"); assertEquals(mockEc2Client.proxyConfiguration.password(), "proxy_password_2"); + assertFalse(mockEc2Client.proxyConfiguration.preemptiveBasicAuthenticationEnabled()); } } } diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 6e84edddcc252..de9c5420ba034 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -51,6 +51,15 @@ dependencies { api "software.amazon.awssdk:annotations:${versions.aws}" api "software.amazon.awssdk:aws-core:${versions.aws}" api "software.amazon.awssdk:auth:${versions.aws}" + api "software.amazon.awssdk:identity-spi:${versions.aws}" + api "software.amazon.awssdk:checksums:${versions.aws}" + api "software.amazon.awssdk:checksums-spi:${versions.aws}" + api "software.amazon.awssdk.crt:aws-crt:${versions.awscrt}" + api "software.amazon.awssdk:http-auth:${versions.aws}" + api "software.amazon.awssdk:http-auth-aws:${versions.aws}" + api "software.amazon.awssdk:http-auth-spi:${versions.aws}" + api "software.amazon.awssdk:retries:${versions.aws}" + api "software.amazon.awssdk:retries-spi:${versions.aws}" api "software.amazon.awssdk:endpoints-spi:${versions.aws}" api "software.amazon.awssdk:http-client-spi:${versions.aws}" api "software.amazon.awssdk:apache-client:${versions.aws}" @@ -517,34 +526,11 @@ thirdPartyAudit { 'reactor.blockhound.BlockHound$Builder', 'reactor.blockhound.integration.BlockHoundIntegration', + 'org.graalvm.nativeimage.hosted.Feature', + 'org.graalvm.nativeimage.hosted.Feature$AfterImageWriteAccess', + 'software.amazon.awssdk.arns.Arn', 'software.amazon.awssdk.arns.ArnResource', - 'software.amazon.awssdk.crt.CRT', - 'software.amazon.awssdk.crt.auth.credentials.Credentials', - 'software.amazon.awssdk.crt.auth.credentials.CredentialsProvider', - 'software.amazon.awssdk.crt.auth.credentials.DelegateCredentialsProvider$DelegateCredentialsProviderBuilder', - 'software.amazon.awssdk.crt.http.HttpHeader', - 'software.amazon.awssdk.crt.http.HttpMonitoringOptions', - 'software.amazon.awssdk.crt.http.HttpProxyOptions', - 'software.amazon.awssdk.crt.http.HttpRequest', - 'software.amazon.awssdk.crt.http.HttpRequestBodyStream', - 'software.amazon.awssdk.crt.io.ClientBootstrap', - 'software.amazon.awssdk.crt.io.ExponentialBackoffRetryOptions', - 'software.amazon.awssdk.crt.io.StandardRetryOptions', - 'software.amazon.awssdk.crt.io.TlsCipherPreference', - 'software.amazon.awssdk.crt.io.TlsContext', - 'software.amazon.awssdk.crt.io.TlsContextOptions', - 'software.amazon.awssdk.crt.s3.ChecksumAlgorithm', - 'software.amazon.awssdk.crt.s3.ChecksumConfig', - 'software.amazon.awssdk.crt.s3.ChecksumConfig$ChecksumLocation', - 'software.amazon.awssdk.crt.s3.ResumeToken', - 'software.amazon.awssdk.crt.s3.S3Client', - 'software.amazon.awssdk.crt.s3.S3ClientOptions', - 'software.amazon.awssdk.crt.s3.S3FinishedResponseContext', - 'software.amazon.awssdk.crt.s3.S3MetaRequest', - 'software.amazon.awssdk.crt.s3.S3MetaRequestOptions', - 'software.amazon.awssdk.crt.s3.S3MetaRequestOptions$MetaRequestType', - 'software.amazon.awssdk.crt.s3.S3MetaRequestResponseHandler', 'software.amazon.awssdk.crtcore.CrtConfigurationUtils', 'software.amazon.awssdk.crtcore.CrtConnectionHealthConfiguration', 'software.amazon.awssdk.crtcore.CrtConnectionHealthConfiguration$Builder', diff --git a/plugins/repository-s3/licenses/annotations-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/annotations-2.20.86.jar.sha1 deleted file mode 100644 index 8d30ad649916b..0000000000000 --- a/plugins/repository-s3/licenses/annotations-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e785e9ecb1230e52e9daa713335f38809ddcb74 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/annotations-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/annotations-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d45f8758c9405 --- /dev/null +++ b/plugins/repository-s3/licenses/annotations-2.30.31.jar.sha1 @@ -0,0 +1 @@ +c5acc1da9567290302d80ffa1633785afa4ce630 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/apache-client-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/apache-client-2.20.86.jar.sha1 deleted file mode 100644 index e7ae36581925c..0000000000000 --- a/plugins/repository-s3/licenses/apache-client-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af31c4d3abec23b73061c6965364a6e3abbcc01a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/apache-client-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/apache-client-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..97331cbda2c1b --- /dev/null +++ b/plugins/repository-s3/licenses/apache-client-2.30.31.jar.sha1 @@ -0,0 +1 @@ +d1c602dba702782a0afec0a08c919322693a3bf8 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/auth-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/auth-2.20.86.jar.sha1 deleted file mode 100644 index e4c1b29cea894..0000000000000 --- a/plugins/repository-s3/licenses/auth-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f2da82f33776ce4814a3ab53b5ccb82a5d135936 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/auth-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/auth-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..c1e199ca02fc8 --- /dev/null +++ b/plugins/repository-s3/licenses/auth-2.30.31.jar.sha1 @@ -0,0 +1 @@ +8887962b04ce5f1a9f46d44acd806949b17082da \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-core-2.20.86.jar.sha1 deleted file mode 100644 index d42a15c4da413..0000000000000 --- a/plugins/repository-s3/licenses/aws-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea126147c3d17a8b3075e3122ec9c2b94fe1f6d5 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-core-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/aws-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..16050fd1d8c6d --- /dev/null +++ b/plugins/repository-s3/licenses/aws-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +5016fadbd7146171b4afe09eb0675b710b0f2d12 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-crt-0.35.0.jar.sha1 b/plugins/repository-s3/licenses/aws-crt-0.35.0.jar.sha1 new file mode 100644 index 0000000000000..1097f5bb4d814 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-crt-0.35.0.jar.sha1 @@ -0,0 +1 @@ +33041403e1a9dd94f40330206eda5ffc22ee185c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-crt-LICENSE.txt b/plugins/repository-s3/licenses/aws-crt-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-crt-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/aws-crt-NOTICE.txt b/plugins/repository-s3/licenses/aws-crt-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/aws-crt-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/aws-json-protocol-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-json-protocol-2.20.86.jar.sha1 deleted file mode 100644 index ee08d240fbfba..0000000000000 --- a/plugins/repository-s3/licenses/aws-json-protocol-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b9d09c1aa9d3f2119267f0b6549ae1810512c7b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-json-protocol-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/aws-json-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..bfc742d8687d1 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-json-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +4600659276f84e114c1fabeb1478911c581a7739 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-query-protocol-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-query-protocol-2.20.86.jar.sha1 deleted file mode 100644 index 9b19f570d56fb..0000000000000 --- a/plugins/repository-s3/licenses/aws-query-protocol-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e001792ec1a681f5bc6ee4157d572173416304ad \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-query-protocol-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/aws-query-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..9508295147c96 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-query-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +61596c0cb577a4a6c438a5a7ee0391d2d825b3fe \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-xml-protocol-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-xml-protocol-2.20.86.jar.sha1 deleted file mode 100644 index 50940d73f4f7b..0000000000000 --- a/plugins/repository-s3/licenses/aws-xml-protocol-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b78a1182a9cf3cccf416cc5a441d08174b08682d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-xml-protocol-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/aws-xml-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..79a09fa635a20 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-xml-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +ad1620b4e221840e2215348a296cc762c23a59c3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/checksums-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/checksums-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..4447b86f6e872 --- /dev/null +++ b/plugins/repository-s3/licenses/checksums-2.30.31.jar.sha1 @@ -0,0 +1 @@ +6d00287bc0ceb013dd5c74f1c4eb296ae61b34d4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/checksums-LICENSE.txt b/plugins/repository-s3/licenses/checksums-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/checksums-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/checksums-NOTICE.txt b/plugins/repository-s3/licenses/checksums-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/checksums-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/checksums-spi-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/checksums-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..078cab150c5ad --- /dev/null +++ b/plugins/repository-s3/licenses/checksums-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b5a5b0a39403acf41c21fd16cd11c7c8d887601b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/checksums-spi-LICENSE.txt b/plugins/repository-s3/licenses/checksums-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/checksums-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/checksums-spi-NOTICE.txt b/plugins/repository-s3/licenses/checksums-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/checksums-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/endpoints-spi-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/endpoints-spi-2.20.86.jar.sha1 deleted file mode 100644 index 16f9db1fd6327..0000000000000 --- a/plugins/repository-s3/licenses/endpoints-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b9075dd0ed32da97f95229f55c01425353e8cba \ No newline at end of file diff --git a/plugins/repository-s3/licenses/endpoints-spi-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/endpoints-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..4dbc884c3da6f --- /dev/null +++ b/plugins/repository-s3/licenses/endpoints-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +0734f4b9c68f19201896dd47639035b4e0a7964d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/http-auth-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/http-auth-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..79893fb4fbf58 --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b7baeb158b0af0e400d89a32595c9127db2bbb6e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/http-auth-LICENSE.txt b/plugins/repository-s3/licenses/http-auth-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/http-auth-NOTICE.txt b/plugins/repository-s3/licenses/http-auth-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/http-auth-aws-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/http-auth-aws-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d190c6ca52e98 --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-aws-2.30.31.jar.sha1 @@ -0,0 +1 @@ +f2a7d383158746c82b0f41b021e0da23a2597b35 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/http-auth-aws-LICENSE.txt b/plugins/repository-s3/licenses/http-auth-aws-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-aws-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/http-auth-aws-NOTICE.txt b/plugins/repository-s3/licenses/http-auth-aws-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-aws-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/http-auth-spi-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/http-auth-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..491ffe4dd0584 --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +513519f79635441d5205fc31d56c2e0d5826d27f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/http-auth-spi-LICENSE.txt b/plugins/repository-s3/licenses/http-auth-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/http-auth-spi-NOTICE.txt b/plugins/repository-s3/licenses/http-auth-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/http-client-spi-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/http-client-spi-2.20.86.jar.sha1 deleted file mode 100644 index 0662e15b1f3e6..0000000000000 --- a/plugins/repository-s3/licenses/http-client-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -776bfc86fabc6e8c792ea4650a281d0bec5e9708 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/http-client-spi-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/http-client-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d86fa139f535c --- /dev/null +++ b/plugins/repository-s3/licenses/http-client-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +5fa894c333793b7481aa03aa87512b20e11b057d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/identity-spi-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/identity-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..9eeab9ad13dba --- /dev/null +++ b/plugins/repository-s3/licenses/identity-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +46da74ac074b176c25fba07c6541737422622c1d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/identity-spi-LICENSE.txt b/plugins/repository-s3/licenses/identity-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/identity-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/identity-spi-NOTICE.txt b/plugins/repository-s3/licenses/identity-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/identity-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/json-utils-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/json-utils-2.20.86.jar.sha1 deleted file mode 100644 index 7011f8c3e6c78..0000000000000 --- a/plugins/repository-s3/licenses/json-utils-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5dd418ad48e3bfd8c3fa05ff29a955b91c1af666 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/json-utils-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/json-utils-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..5019f6d48fa0a --- /dev/null +++ b/plugins/repository-s3/licenses/json-utils-2.30.31.jar.sha1 @@ -0,0 +1 @@ +7f0ef4b49299df2fd39f92113d94524729c61032 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/metrics-spi-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/metrics-spi-2.20.86.jar.sha1 deleted file mode 100644 index bbd88bb9e1b0c..0000000000000 --- a/plugins/repository-s3/licenses/metrics-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74a65d0f8decd0b3057fb500ca5409ff5778752a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/metrics-spi-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/metrics-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..69ab3ec6f79ff --- /dev/null +++ b/plugins/repository-s3/licenses/metrics-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +57a979cbc99d0bf4113d96aaf4f453303a015966 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-nio-client-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/netty-nio-client-2.20.86.jar.sha1 deleted file mode 100644 index 4ae8b2ec5a23c..0000000000000 --- a/plugins/repository-s3/licenses/netty-nio-client-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -29195a65eeea36cf1960d1939bca6586d5842dad \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-nio-client-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/netty-nio-client-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..f49d74cc59e37 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-nio-client-2.30.31.jar.sha1 @@ -0,0 +1 @@ +a7226fc3811c7a071e44a33273e081f212e581e3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/profiles-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/profiles-2.20.86.jar.sha1 deleted file mode 100644 index 425ce9b92f9f2..0000000000000 --- a/plugins/repository-s3/licenses/profiles-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -27a8f4aa488d1d3ef947865ee0190f16d10a3cc7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/profiles-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/profiles-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..6d4d2a1ac8d65 --- /dev/null +++ b/plugins/repository-s3/licenses/profiles-2.30.31.jar.sha1 @@ -0,0 +1 @@ +d6d2d5788695972140dfe8b012ea7ccd97b82eef \ No newline at end of file diff --git a/plugins/repository-s3/licenses/protocol-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/protocol-core-2.20.86.jar.sha1 deleted file mode 100644 index 8de58699d8d82..0000000000000 --- a/plugins/repository-s3/licenses/protocol-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bd85984ac6327a50d20e7957ecebf4fa3ad7766b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/protocol-core-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/protocol-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..caae2a4302976 --- /dev/null +++ b/plugins/repository-s3/licenses/protocol-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +ee17b25525aee497b6d520c8e499f39de7204fbc \ No newline at end of file diff --git a/plugins/repository-s3/licenses/regions-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/regions-2.20.86.jar.sha1 deleted file mode 100644 index 266bc76ad6f77..0000000000000 --- a/plugins/repository-s3/licenses/regions-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04fd460ce1c633986ecef1b4218d3e7067a7087d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/regions-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/regions-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..8e9876686a144 --- /dev/null +++ b/plugins/repository-s3/licenses/regions-2.30.31.jar.sha1 @@ -0,0 +1 @@ +7ce1df66496dcf9b124edb78ab9675e1e7d5c427 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/retries-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/retries-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..98b46e3439ac7 --- /dev/null +++ b/plugins/repository-s3/licenses/retries-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b490f67c9d3f000ae40928d9aa3c9debceac0966 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/retries-LICENSE.txt b/plugins/repository-s3/licenses/retries-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/retries-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/retries-NOTICE.txt b/plugins/repository-s3/licenses/retries-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/retries-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/retries-spi-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/retries-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..854e3d7e4aebf --- /dev/null +++ b/plugins/repository-s3/licenses/retries-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +4d9166189594243f88045fbf0c871a81e3914c0b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/retries-spi-LICENSE.txt b/plugins/repository-s3/licenses/retries-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/retries-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/retries-spi-NOTICE.txt b/plugins/repository-s3/licenses/retries-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/retries-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/s3-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/s3-2.20.86.jar.sha1 deleted file mode 100644 index 7125793759db5..0000000000000 --- a/plugins/repository-s3/licenses/s3-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6a37f591abd11a3f848f091f1724825741daaeb2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/s3-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/s3-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..eb9aa9d13fe83 --- /dev/null +++ b/plugins/repository-s3/licenses/s3-2.30.31.jar.sha1 @@ -0,0 +1 @@ +958f263cf6b7e2ce6eb453627d57debd7fdd449b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sdk-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/sdk-core-2.20.86.jar.sha1 deleted file mode 100644 index 9eca40e6b9a9a..0000000000000 --- a/plugins/repository-s3/licenses/sdk-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1b9df9ca5e4918fab05db3b703b2873e83104c30 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sdk-core-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/sdk-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..ee3d7e3bff68d --- /dev/null +++ b/plugins/repository-s3/licenses/sdk-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b95c07d4796105c2e61c4c6ab60e3189886b2787 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/signer-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/signer-2.20.86.jar.sha1 deleted file mode 100644 index cb73b19e14fcf..0000000000000 --- a/plugins/repository-s3/licenses/signer-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52b92753b944d3e1b8c6814bc9d6c93119ca6421 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/signer-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/signer-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..a03a173e4e2ad --- /dev/null +++ b/plugins/repository-s3/licenses/signer-2.30.31.jar.sha1 @@ -0,0 +1 @@ +e3d07951f347b85e5129cc31ed613a70f9259cac \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sts-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/sts-2.20.86.jar.sha1 deleted file mode 100644 index 1f40b6dcd8417..0000000000000 --- a/plugins/repository-s3/licenses/sts-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7415d850a4aea10935f84766065dd76a3d327a54 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sts-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/sts-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..3752d0003bc8d --- /dev/null +++ b/plugins/repository-s3/licenses/sts-2.30.31.jar.sha1 @@ -0,0 +1 @@ +fb85a774f8e7265ed4bc4255e6df8a80ee8cf4b9 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/third-party-jackson-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/third-party-jackson-core-2.20.86.jar.sha1 deleted file mode 100644 index c9c3d4dc53505..0000000000000 --- a/plugins/repository-s3/licenses/third-party-jackson-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ad6e7f7d52d8a5390b2daf2fd8ffcab97fe3102 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/third-party-jackson-core-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/third-party-jackson-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..a07a8eda62447 --- /dev/null +++ b/plugins/repository-s3/licenses/third-party-jackson-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +100d8022939bd59cd7d2461bd4fb0fd9fa028499 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/utils-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/utils-2.20.86.jar.sha1 deleted file mode 100644 index b91a3b3047570..0000000000000 --- a/plugins/repository-s3/licenses/utils-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7a61f8b3c54ecf3dc785830d4f482f19ca52bc57 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/utils-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/utils-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..184ff1cc5f9ce --- /dev/null +++ b/plugins/repository-s3/licenses/utils-2.30.31.jar.sha1 @@ -0,0 +1 @@ +3340adacb87ff28f90a039d57c81311b296db89e \ No newline at end of file From 82bbdfb71f127367ba1c6f36d7a1e68f4fa182cd Mon Sep 17 00:00:00 2001 From: Shailesh Singh Date: Wed, 5 Mar 2025 07:17:55 +0530 Subject: [PATCH 42/48] Fix Bug - Handle unsigned long in sorting order assertion of LongHashSet (#17207) * Fix Bug - handle unsigned long in assertion of LongHashSet Signed-off-by: Shailesh Singh * renamed TestDocValuesUnsignedLongHashSet.java to DocValuesUnsignedLongHashSetTests.java Signed-off-by: Shailesh Singh * Update server/src/main/java/org/opensearch/lucene/util/UnsignedLongHashSet.java Co-authored-by: Andriy Redko Signed-off-by: Shailesh Singh --------- Signed-off-by: Shailesh Singh Signed-off-by: Shailesh Singh Co-authored-by: Shailesh Singh Co-authored-by: Andriy Redko --- CHANGELOG.md | 1 + .../SortedUnsignedLongDocValuesSetQuery.java | 6 +- .../lucene/util/UnsignedLongHashSet.java | 139 +++++++++++++++++ .../DocValuesUnsignedLongHashSetTests.java | 141 ++++++++++++++++++ 4 files changed, 284 insertions(+), 3 deletions(-) create mode 100644 server/src/main/java/org/opensearch/lucene/util/UnsignedLongHashSet.java create mode 100644 server/src/test/java/org/opensearch/lucene/util/DocValuesUnsignedLongHashSetTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 091e832e01cd1..f40100aa2650e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/)) - Added ability to retrieve value from DocValues in a flat_object filed([#16802](https://github.com/opensearch-project/OpenSearch/pull/16802)) - Improve performace of NumericTermAggregation by avoiding unnecessary sorting([#17252](https://github.com/opensearch-project/OpenSearch/pull/17252)) +- Fix Bug - Handle unsigned long in sorting order assertion of LongHashSet ([#17207](https://github.com/opensearch-project/OpenSearch/pull/17207)) - Implemented computation of segment replication stats at shard level ([#17055](https://github.com/opensearch-project/OpenSearch/pull/17055)) - [Rule Based Auto-tagging] Add in-memory attribute value store ([#17342](https://github.com/opensearch-project/OpenSearch/pull/17342)) diff --git a/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesSetQuery.java b/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesSetQuery.java index 932f523ae071e..3d677aa6a8dfe 100644 --- a/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesSetQuery.java +++ b/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesSetQuery.java @@ -25,7 +25,7 @@ import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; -import org.opensearch.lucene.util.LongHashSet; +import org.opensearch.lucene.util.UnsignedLongHashSet; import java.io.IOException; import java.math.BigInteger; @@ -40,12 +40,12 @@ public abstract class SortedUnsignedLongDocValuesSetQuery extends Query { private final String field; - private final LongHashSet numbers; + private final UnsignedLongHashSet numbers; SortedUnsignedLongDocValuesSetQuery(String field, BigInteger[] numbers) { this.field = Objects.requireNonNull(field); Arrays.sort(numbers); - this.numbers = new LongHashSet(Arrays.stream(numbers).mapToLong(n -> n.longValue()).toArray()); + this.numbers = new UnsignedLongHashSet(Arrays.stream(numbers).mapToLong(n -> n.longValue()).toArray()); } @Override diff --git a/server/src/main/java/org/opensearch/lucene/util/UnsignedLongHashSet.java b/server/src/main/java/org/opensearch/lucene/util/UnsignedLongHashSet.java new file mode 100644 index 0000000000000..ab9d4c4c91afc --- /dev/null +++ b/server/src/main/java/org/opensearch/lucene/util/UnsignedLongHashSet.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.lucene.util; + +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.apache.lucene.util.packed.PackedInts; +import org.opensearch.common.Numbers; + +import java.util.Arrays; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +/** Set of unsigned-longs, optimized for docvalues usage */ +public final class UnsignedLongHashSet implements Accountable { + private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(UnsignedLongHashSet.class); + + private static final long MISSING = Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG; + + final long[] table; + final int mask; + final boolean hasMissingValue; + final int size; + /** minimum value in the set, or Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG for an empty set */ + public final long minValue; + /** maximum value in the set, or Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG for an empty set */ + public final long maxValue; + + /** Construct a set. Values must be in sorted order. */ + public UnsignedLongHashSet(long[] values) { + int tableSize = Math.toIntExact(values.length * 3L / 2); + tableSize = 1 << PackedInts.bitsRequired(tableSize); // make it a power of 2 + assert tableSize >= values.length * 3L / 2; + table = new long[tableSize]; + Arrays.fill(table, MISSING); + mask = tableSize - 1; + boolean hasMissingValue = false; + int size = 0; + long previousValue = Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG; // for assert + for (long value : values) { + if (value == MISSING) { + size += hasMissingValue ? 0 : 1; + hasMissingValue = true; + } else if (add(value)) { + ++size; + } + assert Long.compareUnsigned(value, previousValue) >= 0 : " values must be provided in sorted order"; + previousValue = value; + } + this.hasMissingValue = hasMissingValue; + this.size = size; + this.minValue = values.length == 0 ? Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG : values[0]; + this.maxValue = values.length == 0 ? Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG : values[values.length - 1]; + } + + private boolean add(long l) { + assert l != MISSING; + final int slot = Long.hashCode(l) & mask; + for (int i = slot;; i = (i + 1) & mask) { + if (table[i] == MISSING) { + table[i] = l; + return true; + } else if (table[i] == l) { + // already added + return false; + } + } + } + + /** + * check for membership in the set. + * + *

You should use {@link #minValue} and {@link #maxValue} to guide/terminate iteration before + * calling this. + */ + public boolean contains(long l) { + if (l == MISSING) { + return hasMissingValue; + } + final int slot = Long.hashCode(l) & mask; + for (int i = slot;; i = (i + 1) & mask) { + if (table[i] == MISSING) { + return false; + } else if (table[i] == l) { + return true; + } + } + } + + /** returns a stream of all values contained in this set */ + public LongStream stream() { + LongStream stream = Arrays.stream(table).filter(v -> v != MISSING); + if (hasMissingValue) { + stream = LongStream.concat(LongStream.of(MISSING), stream); + } + return stream; + } + + @Override + public int hashCode() { + return Objects.hash(size, minValue, maxValue, mask, hasMissingValue, Arrays.hashCode(table)); + } + + @Override + public boolean equals(Object obj) { + if (obj != null && obj instanceof UnsignedLongHashSet) { + UnsignedLongHashSet that = (UnsignedLongHashSet) obj; + return size == that.size + && minValue == that.minValue + && maxValue == that.maxValue + && mask == that.mask + && hasMissingValue == that.hasMissingValue + && Arrays.equals(table, that.table); + } + return false; + } + + @Override + public String toString() { + return stream().mapToObj(Long::toUnsignedString).collect(Collectors.joining(", ", "[", "]")); + } + + /** number of elements in the set */ + public int size() { + return size; + } + + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES + RamUsageEstimator.sizeOfObject(table); + } +} diff --git a/server/src/test/java/org/opensearch/lucene/util/DocValuesUnsignedLongHashSetTests.java b/server/src/test/java/org/opensearch/lucene/util/DocValuesUnsignedLongHashSetTests.java new file mode 100644 index 0000000000000..a22ae031b97b5 --- /dev/null +++ b/server/src/test/java/org/opensearch/lucene/util/DocValuesUnsignedLongHashSetTests.java @@ -0,0 +1,141 @@ +package org.opensearch.lucene.util; + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.common.Numbers; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +public class DocValuesUnsignedLongHashSetTests extends LuceneTestCase { + + private void assertEquals(Set set1, UnsignedLongHashSet unsignedLongHashSet) { + assertEquals(set1.size(), unsignedLongHashSet.size()); + + Set set2 = unsignedLongHashSet.stream().boxed().collect(Collectors.toSet()); + LuceneTestCase.assertEquals(set1, set2); + + if (set1.isEmpty() == false) { + Set set3 = new HashSet<>(set1); + long removed = set3.iterator().next(); + while (true) { + long next = random().nextLong(); + if (next != removed && set3.add(next)) { + assertFalse(unsignedLongHashSet.contains(next)); + break; + } + } + assertNotEquals(set3, unsignedLongHashSet); + } + + assertTrue(set1.stream().allMatch(unsignedLongHashSet::contains)); + } + + private void assertNotEquals(Set set1, UnsignedLongHashSet unsignedLongHashSet) { + Set set2 = unsignedLongHashSet.stream().boxed().collect(Collectors.toSet()); + + LuceneTestCase.assertNotEquals(set1, set2); + + UnsignedLongHashSet set3 = new UnsignedLongHashSet( + set1.stream().sorted(Long::compareUnsigned).mapToLong(Long::longValue).toArray() + ); + + LuceneTestCase.assertNotEquals(set2, set3.stream().boxed().collect(Collectors.toSet())); + + assertFalse(set1.stream().allMatch(unsignedLongHashSet::contains)); + } + + public void testEmpty() { + Set set1 = new HashSet<>(); + UnsignedLongHashSet set2 = new UnsignedLongHashSet(new long[] {}); + assertEquals(0, set2.size()); + assertEquals(Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG, set2.minValue); + assertEquals(Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG, set2.maxValue); + assertEquals(set1, set2); + } + + public void testOneValue() { + Set set1 = new HashSet<>(Arrays.asList(42L)); + UnsignedLongHashSet set2 = new UnsignedLongHashSet(new long[] { 42L }); + assertEquals(1, set2.size()); + assertEquals(42L, set2.minValue); + assertEquals(42L, set2.maxValue); + assertEquals(set1, set2); + + set1 = new HashSet<>(Arrays.asList(Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG)); + set2 = new UnsignedLongHashSet(new long[] { Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG }); + assertEquals(1, set2.size()); + assertEquals(Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG, set2.minValue); + assertEquals(Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG, set2.maxValue); + assertEquals(set1, set2); + } + + public void testTwoValues() { + Set set1 = new HashSet<>(Arrays.asList(42L, Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG)); + UnsignedLongHashSet set2 = new UnsignedLongHashSet(new long[] { 42L, Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG }); + assertEquals(2, set2.size()); + assertEquals(42, set2.minValue); + assertEquals(Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG, set2.maxValue); + assertEquals(set1, set2); + + set1 = new HashSet<>(Arrays.asList(Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG, 42L)); + set2 = new UnsignedLongHashSet(new long[] { Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG, 42L }); + assertEquals(2, set2.size()); + assertEquals(Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG, set2.minValue); + assertEquals(42, set2.maxValue); + assertEquals(set1, set2); + } + + public void testSameValue() { + UnsignedLongHashSet set2 = new UnsignedLongHashSet(new long[] { 42L, 42L }); + assertEquals(1, set2.size()); + assertEquals(42L, set2.minValue); + assertEquals(42L, set2.maxValue); + } + + public void testSameMissingPlaceholder() { + UnsignedLongHashSet set2 = new UnsignedLongHashSet(new long[] { Long.MIN_VALUE, Long.MIN_VALUE }); + assertEquals(1, set2.size()); + assertEquals(Long.MIN_VALUE, set2.minValue); + assertEquals(Long.MIN_VALUE, set2.maxValue); + } + + public void testRandom() { + final int iters = atLeast(10); + for (int iter = 0; iter < iters; ++iter) { + long[] values = new long[random().nextInt(1 << random().nextInt(16))]; + for (int i = 0; i < values.length; ++i) { + if (i == 0 || random().nextInt(10) < 9) { + values[i] = random().nextLong(); + } else { + values[i] = values[random().nextInt(i)]; + } + } + if (values.length > 0 && random().nextBoolean()) { + values[values.length / 2] = Long.MIN_VALUE; + } + Set set1 = LongStream.of(values).boxed().collect(Collectors.toSet()); + Long[] longObjects = Arrays.stream(values).boxed().toArray(Long[]::new); + // Sort using compareUnsigned + Arrays.sort(longObjects, Long::compareUnsigned); + + long[] arr = new long[values.length]; + // Convert back to long[] + for (int i = 0; i < arr.length; i++) { + arr[i] = longObjects[i]; + } + UnsignedLongHashSet set2 = new UnsignedLongHashSet(arr); + assertEquals(set1, set2); + } + } +} From 7330a88ebc5899c302422a65bc0169a83a880aa6 Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Tue, 4 Mar 2025 22:25:45 -0500 Subject: [PATCH 43/48] Add Windows 2025+Java 21 combination to build matrix (#17508) Signed-off-by: Daniel Widdis --- .github/workflows/precommit.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index 7e20912fe1f60..a7b9496481a5d 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -9,6 +9,10 @@ jobs: matrix: java: [ 21, 23 ] os: [ubuntu-latest, windows-latest, macos-latest, macos-13, ubuntu-24.04-arm] + include: + - java: 21 + os: 'windows-2025' + experimental: true steps: - uses: actions/checkout@v4 - name: Set up JDK ${{ matrix.java }} @@ -18,6 +22,7 @@ jobs: distribution: temurin cache: gradle - name: Run Gradle (precommit) + continue-on-error: ${{ matrix.experimental }} shell: bash run: | ./gradlew javadoc precommit --parallel From cc82be9cda7414f919cf64a52880135c2f4937f0 Mon Sep 17 00:00:00 2001 From: Peter Zhu Date: Wed, 5 Mar 2025 16:27:14 -0500 Subject: [PATCH 44/48] Switch main/3.x to use JDK21 LTS version (#17515) * Switch main/3.x to use JDK21 LTS version Signed-off-by: Peter Zhu * Update changelog 3.0 Signed-off-by: Peter Zhu --------- Signed-off-by: Peter Zhu --- CHANGELOG-3.0.md | 1 + .../java/org/opensearch/gradle/test/DistroTestPlugin.java | 4 ++-- gradle/libs.versions.toml | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 7e82efd268007..5128d2f9ef3a0 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -29,6 +29,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) - Bump Apache HttpCore5/HttpClient5 dependencies from 5.2.5/5.3.1 to 5.3.1/5.4.1 to support ExtendedSocketOption in HttpAsyncClient ([#16757](https://github.com/opensearch-project/OpenSearch/pull/16757)) - Bumps `jetty` version from 9.4.55.v20240627 to 9.4.57.v20241219 +- Switch main/3.x to use JDK21 LTS version ([#17515](https://github.com/opensearch-project/OpenSearch/pull/17515)) ### Changed - Changed locale provider from COMPAT to CLDR ([#14345](https://github.com/opensearch-project/OpenSearch/pull/14345)) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index 654af7da65662..888cd8d4bf5b5 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -77,9 +77,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin { - private static final String SYSTEM_JDK_VERSION = "23.0.2+7"; + private static final String SYSTEM_JDK_VERSION = "21.0.6+7"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "23.0.2+7"; + private static final String GRADLE_JDK_VERSION = "21.0.6+7"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 90518ca71ec53..8d8c49e531e77 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -3,7 +3,7 @@ opensearch = "3.0.0" lucene = "10.1.0" bundled_jdk_vendor = "adoptium" -bundled_jdk = "23.0.2+7" +bundled_jdk = "21.0.6+7" # optional dependencies spatial4j = "0.7" From 02055cc1388dc0af73572f812cac803d2c697b56 Mon Sep 17 00:00:00 2001 From: Rajat Gupta <72070007+RajatGupta02@users.noreply.github.com> Date: Thu, 6 Mar 2025 04:46:22 +0530 Subject: [PATCH 45/48] Add sample integ tests for latest systemd unit file (#17410) * Add integration tests for systemd Signed-off-by: Rajat Gupta * Fix indentation Signed-off-by: Rajat Gupta * Remove unit file mount Signed-off-by: Rajat Gupta * Use centos image Signed-off-by: Rajat Gupta * Change method name Signed-off-by: Rajat Gupta * Add sample systemd integ tests to verify behavior Signed-off-by: Rajat Gupta * Update su with sudo probably need to have a privileged mode Signed-off-by: Peter Zhu * Additional tests Signed-off-by: Rajat Gupta * Wrap commands with su -c Signed-off-by: Rajat Gupta * Add sudo Signed-off-by: Rajat Gupta * Remove sudo for test process exit Signed-off-by: Rajat Gupta * Minor fixes Signed-off-by: Rajat Gupta * Fixed script string Signed-off-by: Rajat Gupta * Remove redundant code Signed-off-by: Rajat Gupta * Add terminate script Signed-off-by: Rajat Gupta * Modified terminate script Signed-off-by: Rajat Gupta * Add Changelog-3.0 entry Signed-off-by: Rajat Gupta * Fix for gradle precommit workflow Signed-off-by: Rajat Gupta * Fix testing conventions gradle precommit check Signed-off-by: Rajat Gupta * Fix imports Signed-off-by: Rajat Gupta * Only run as part of build integTest, remove gradle check Signed-off-by: Rajat Gupta * Remove bash Signed-off-by: Rajat Gupta * add sudo for systemctl command Signed-off-by: Rajat Gupta * Remove OpenSearchIntegTest class Signed-off-by: Rajat Gupta * Rename test file Signed-off-by: Rajat Gupta * Add test script Signed-off-by: Rajat Gupta * Extend LuceneTestCase class Signed-off-by: Rajat Gupta * Remove test bash script Signed-off-by: Rajat Gupta * Modify build.gradle Signed-off-by: Rajat Gupta --------- Signed-off-by: Rajat Gupta Signed-off-by: Peter Zhu Signed-off-by: Rajat Gupta <72070007+RajatGupta02@users.noreply.github.com> Co-authored-by: Rajat Gupta Co-authored-by: Peter Zhu --- CHANGELOG-3.0.md | 2 + qa/systemd-test/build.gradle | 5 + .../systemdinteg/SystemdIntegTests.java | 177 ++++++++++++++++++ .../src/test/resources/scripts/terminate.sh | 12 ++ 4 files changed, 196 insertions(+) create mode 100644 qa/systemd-test/build.gradle create mode 100644 qa/systemd-test/src/test/java/org/opensearch/systemdinteg/SystemdIntegTests.java create mode 100755 qa/systemd-test/src/test/resources/scripts/terminate.sh diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 5128d2f9ef3a0..99b636822fb72 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -21,10 +21,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add execution_hint to cardinality aggregator request (#[17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) - Arrow Flight RPC plugin with Flight server bootstrap logic and client for internode communication ([#16962](https://github.com/opensearch-project/OpenSearch/pull/16962)) - Added offset management for the pull-based Ingestion ([#17354](https://github.com/opensearch-project/OpenSearch/pull/17354)) +- Added integ tests for systemd configs ([#17410](https://github.com/opensearch-project/OpenSearch/pull/17410)) - Add filter function for AbstractQueryBuilder, BoolQueryBuilder, ConstantScoreQueryBuilder([#17409](https://github.com/opensearch-project/OpenSearch/pull/17409)) - [Star Tree] [Search] Resolving keyword & numeric bucket aggregation with metric aggregation using star-tree ([#17165](https://github.com/opensearch-project/OpenSearch/pull/17165)) - Added error handling support for the pull-based ingestion ([#17427](https://github.com/opensearch-project/OpenSearch/pull/17427)) + ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) - Bump Apache HttpCore5/HttpClient5 dependencies from 5.2.5/5.3.1 to 5.3.1/5.4.1 to support ExtendedSocketOption in HttpAsyncClient ([#16757](https://github.com/opensearch-project/OpenSearch/pull/16757)) diff --git a/qa/systemd-test/build.gradle b/qa/systemd-test/build.gradle new file mode 100644 index 0000000000000..7db5ddbf9ff12 --- /dev/null +++ b/qa/systemd-test/build.gradle @@ -0,0 +1,5 @@ +apply plugin: 'opensearch.standalone-rest-test' + +tasks.register("integTest", Test){ + include "**/*IntegTests.class" +} diff --git a/qa/systemd-test/src/test/java/org/opensearch/systemdinteg/SystemdIntegTests.java b/qa/systemd-test/src/test/java/org/opensearch/systemdinteg/SystemdIntegTests.java new file mode 100644 index 0000000000000..2beadd9445412 --- /dev/null +++ b/qa/systemd-test/src/test/java/org/opensearch/systemdinteg/SystemdIntegTests.java @@ -0,0 +1,177 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.systemdinteg; +import org.apache.lucene.tests.util.LuceneTestCase; + +import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpStatus; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.io.BufferedReader; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.PosixFilePermissions; +import java.util.Locale; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + + +public class SystemdIntegTests extends LuceneTestCase { + + private static String opensearchPid; + + @BeforeClass + public static void setup() throws IOException, InterruptedException { + opensearchPid = getOpenSearchPid(); + + if (opensearchPid.isEmpty()) { + throw new RuntimeException("Failed to find OpenSearch process ID"); + } + } + + private static String getOpenSearchPid() throws IOException, InterruptedException { + String command = "systemctl show --property=MainPID opensearch"; + String output = executeCommand(command, "Failed to get OpenSearch PID"); + return output.replace("MainPID=", "").trim(); + } + + private boolean checkPathExists(String path) throws IOException, InterruptedException { + String command = String.format(Locale.ROOT, "test -e %s && echo true || echo false", path); + return Boolean.parseBoolean(executeCommand(command, "Failed to check path existence")); + } + + private boolean checkPathReadable(String path) throws IOException, InterruptedException { + String command = String.format(Locale.ROOT, "sudo su opensearch -s /bin/sh -c 'test -r %s && echo true || echo false'", path); + return Boolean.parseBoolean(executeCommand(command, "Failed to check read permission")); + } + + private boolean checkPathWritable(String path) throws IOException, InterruptedException { + String command = String.format(Locale.ROOT, "sudo su opensearch -s /bin/sh -c 'test -w %s && echo true || echo false'", path); + return Boolean.parseBoolean(executeCommand(command, "Failed to check write permission")); + } + + private String getPathOwnership(String path) throws IOException, InterruptedException { + String command = String.format(Locale.ROOT, "stat -c '%%U:%%G' %s", path); + return executeCommand(command, "Failed to get path ownership"); + } + + private static String executeCommand(String command, String errorMessage) throws IOException, InterruptedException { + Process process = Runtime.getRuntime().exec(new String[]{"bash", "-c", command}); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { + StringBuilder output = new StringBuilder(); + String line; + while ((line = reader.readLine()) != null) { + output.append(line).append("\n"); + } + if (process.waitFor() != 0) { + throw new RuntimeException(errorMessage); + } + return output.toString().trim(); + } + } + + public void testReadOnlyPaths() throws IOException, InterruptedException { + String[] readOnlyPaths = { + "/etc/os-release", "/usr/lib/os-release", "/etc/system-release", + "/proc/self/mountinfo", "/proc/diskstats", + "/proc/self/cgroup", "/sys/fs/cgroup/cpu", "/sys/fs/cgroup/cpu/-", + "/sys/fs/cgroup/cpuacct", "/sys/fs/cgroup/cpuacct/-", + "/sys/fs/cgroup/memory", "/sys/fs/cgroup/memory/-" + }; + + for (String path : readOnlyPaths) { + if (checkPathExists(path)) { + assertTrue("Path should be readable: " + path, checkPathReadable(path)); + assertFalse("Path should not be writable: " + path, checkPathWritable(path)); + } + } + } + + public void testReadWritePaths() throws IOException, InterruptedException { + String[] readWritePaths = {"/var/log/opensearch", "/var/lib/opensearch"}; + for (String path : readWritePaths) { + assertTrue("Path should exist: " + path, checkPathExists(path)); + assertTrue("Path should be readable: " + path, checkPathReadable(path)); + assertTrue("Path should be writable: " + path, checkPathWritable(path)); + assertEquals("Path should be owned by opensearch:opensearch", "opensearch:opensearch", getPathOwnership(path)); + } + } + + public void testMaxProcesses() throws IOException, InterruptedException { + String limits = executeCommand("sudo su -c 'cat /proc/" + opensearchPid + "/limits'", "Failed to read process limits"); + assertTrue("Max processes limit should be 4096 or unlimited", + limits.contains("Max processes 4096 4096") || + limits.contains("Max processes unlimited unlimited")); + } + + public void testFileDescriptorLimit() throws IOException, InterruptedException { + String limits = executeCommand("sudo su -c 'cat /proc/" + opensearchPid + "/limits'", "Failed to read process limits"); + assertTrue("File descriptor limit should be at least 65535", + limits.contains("Max open files 65535 65535") || + limits.contains("Max open files unlimited unlimited")); + } + + public void testSystemCallFilter() throws IOException, InterruptedException { + // Check if Seccomp is enabled + String seccomp = executeCommand("sudo su -c 'grep Seccomp /proc/" + opensearchPid + "/status'", "Failed to read Seccomp status"); + assertFalse("Seccomp should be enabled", seccomp.contains("0")); + + // Test specific system calls that should be blocked + String rebootResult = executeCommand("sudo su opensearch -c 'kill -s SIGHUP 1' 2>&1 || echo 'Operation not permitted'", "Failed to test reboot system call"); + assertTrue("Reboot system call should be blocked", rebootResult.contains("Operation not permitted")); + + String swapResult = executeCommand("sudo su opensearch -c 'swapon -a' 2>&1 || echo 'Operation not permitted'", "Failed to test swap system call"); + assertTrue("Swap system call should be blocked", swapResult.contains("Operation not permitted")); + } + + public void testOpenSearchProcessCannotExit() throws IOException, InterruptedException { + + String scriptPath; + try { + scriptPath = SystemdIntegTests.class.getResource("/scripts/terminate.sh").toURI().getPath(); + } catch (URISyntaxException e) { + throw new RuntimeException("Failed to convert URL to URI", e); + } + + if (scriptPath == null) { + throw new IllegalStateException("Could not find terminate.sh script in resources"); + } + ProcessBuilder processBuilder = new ProcessBuilder(scriptPath, opensearchPid); + Process process = processBuilder.start(); + + // Wait a moment for any potential termination to take effect + Thread.sleep(2000); + + // Verify the OpenSearch service status + String serviceStatus = executeCommand( + "systemctl is-active opensearch", + "Failed to check OpenSearch service status" + ); + + assertEquals("OpenSearch service should be active", "active", serviceStatus.trim()); + } + +} diff --git a/qa/systemd-test/src/test/resources/scripts/terminate.sh b/qa/systemd-test/src/test/resources/scripts/terminate.sh new file mode 100755 index 0000000000000..21ea62a475e70 --- /dev/null +++ b/qa/systemd-test/src/test/resources/scripts/terminate.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +if [ $# -ne 1 ]; then + echo "Usage: $0 " + exit 1 +fi + +if kill -15 $1 2>/dev/null; then + echo "SIGTERM signal sent to process $1" +else + echo "Failed to send SIGTERM to process $1" +fi \ No newline at end of file From 3966ed93beb2ff13854d2a6e1ff51dfa32295830 Mon Sep 17 00:00:00 2001 From: Eric Pugh Date: Wed, 5 Mar 2025 19:08:12 -0500 Subject: [PATCH 46/48] Fix small typo in DEVELOPER_GUIDE.md (#17512) Signed-off-by: Eric Pugh --- DEVELOPER_GUIDE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index b40f5d9b3f21a..e7ad1d8120ea6 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -127,7 +127,7 @@ All distributions built will be under `distributions/archives`. #### Generated Code OpenSearch uses code generators like [Protobuf](https://protobuf.dev/). -OpenSearch build system already takes a dependency of generating code from protobuf, incase you run into compilation errors, run: +OpenSearch build system already takes a dependency of generating code from protobuf, if you run into compilation errors, run: ``` ./gradlew generateProto From 342c6458f3ced88aa351e8807364f18b35c272ec Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Thu, 6 Mar 2025 19:04:17 +0530 Subject: [PATCH 47/48] Fix red index on close for remote enabled clusters (#17521) Signed-off-by: Ashish Singh --- .../org/opensearch/index/engine/ReadOnlyEngine.java | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index 1852f2fa92b74..5d42a7b830de0 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -40,6 +40,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; @@ -166,7 +167,7 @@ public ReadOnlyEngine( } protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(final SeqNoStats seqNoStats) { - if (requireCompleteHistory == false) { + if (requireCompleteHistory == false || isClosedRemoteIndex()) { return; } // Before 3.0 the global checkpoint is not known and up to date when the engine is created after @@ -187,6 +188,14 @@ protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(final SeqNoStats seqNoStat } } + /** + * Returns true if this is a remote store index (included if migrating as well) which is closed. + */ + private boolean isClosedRemoteIndex() { + return this.engineConfig.getIndexSettings().isAssignedOnRemoteNode() + && this.engineConfig.getIndexSettings().getIndexMetadata().getState() == IndexMetadata.State.CLOSE; + } + protected boolean assertMaxSeqNoEqualsToGlobalCheckpoint(final long maxSeqNo, final long globalCheckpoint) { assert maxSeqNo == globalCheckpoint : "max seq. no. [" + maxSeqNo + "] does not match [" + globalCheckpoint + "]"; return true; From 73882054afcdb74244c07c5be1f54a629ffd0bc2 Mon Sep 17 00:00:00 2001 From: Divyansh Pandey <98746046+pandeydivyansh1803@users.noreply.github.com> Date: Thu, 6 Mar 2025 20:21:20 +0530 Subject: [PATCH 48/48] Update validator for index update request (#17529) Signed-off-by: Divyansh Pandey Co-authored-by: Divyansh Pandey --- ...AllocationDeciderRemoteStoreEnabledIT.java | 71 +++++++++++++++++++ .../metadata/MetadataCreateIndexService.java | 3 +- .../MetadataUpdateSettingsService.java | 32 ++++++++- 3 files changed, 103 insertions(+), 3 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java index 401db7790de92..72c849cb395af 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java @@ -8,6 +8,8 @@ package org.opensearch.cluster.routing.allocation.decider; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; @@ -99,6 +101,75 @@ public void testIndexPrimaryShardLimit() throws Exception { }); } + public void testUpdatingIndexPrimaryShardLimit() throws Exception { + // Create first index with primary shard limit + Settings firstIndexSettings = Settings.builder() + .put(remoteStoreIndexSettings(0, 4)) // 4 shards, 0 replicas + .put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1) + .build(); + + // Create first index + createIndex("test1", firstIndexSettings); + + // Update the index settings to set INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest("test1"); + Settings updatedSettings = Settings.builder().put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1).build(); + updateSettingsRequest.settings(updatedSettings); + + AcknowledgedResponse response = client().admin().indices().updateSettings(updateSettingsRequest).actionGet(); + + assertTrue(response.isAcknowledged()); + + // Create second index + createIndex("test2", remoteStoreIndexSettings(0, 4)); + + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + // Check total number of shards (8 total: 4 from each index) + assertEquals("Total shards should be 8", 8, state.getRoutingTable().allShards().size()); + + // Count assigned and unassigned shards for test1 + int test1AssignedShards = 0; + int test1UnassignedShards = 0; + Map nodePrimaryCount = new HashMap<>(); + + // Check test1 shard distribution + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test1")) { + for (ShardRouting shard : shardRouting) { + if (shard.assignedToNode()) { + test1AssignedShards++; + // Count primaries per node for test1 + String nodeId = shard.currentNodeId(); + nodePrimaryCount.merge(nodeId, 1, Integer::sum); + } else { + test1UnassignedShards++; + } + } + } + + // Check test2 shard assignment + int test2UnassignedShards = 0; + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test2")) { + for (ShardRouting shard : shardRouting) { + if (!shard.assignedToNode()) { + test2UnassignedShards++; + } + } + } + + // Assertions + assertEquals("test1 should have 3 assigned shards", 3, test1AssignedShards); + assertEquals("test1 should have 1 unassigned shard", 1, test1UnassignedShards); + assertEquals("test2 should have no unassigned shards", 0, test2UnassignedShards); + + // Verify no node has more than one primary shard of test1 + for (Integer count : nodePrimaryCount.values()) { + assertTrue("No node should have more than 1 primary shard of test1", count <= 1); + } + }); + } + public void testClusterPrimaryShardLimitss() throws Exception { // Update cluster setting to limit primary shards per node updateClusterSetting(CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index a81fe01f0e7f4..2bdd31b23aee3 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1847,7 +1847,8 @@ public static void validateRefreshIntervalSettings(Settings requestSettings, Clu } /** - * Validates {@code index.routing.allocation.total_primary_shards_per_node} is only set for remote store enabled cluster + * Validates the {@code index.routing.allocation.total_primary_shards_per_node} setting during index creation. + * Ensures this setting is only specified for remote store enabled clusters. */ // TODO : Update this check for SegRep to DocRep migration on need basis public static void validateIndexTotalPrimaryShardsPerNodeSetting(Settings indexSettings) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index eb10fd5d04288..469bec7220721 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -43,6 +43,7 @@ import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlocks; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; @@ -78,12 +79,12 @@ import static org.opensearch.action.support.ContextPreservingActionListener.wrapPreservingContext; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; -import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateIndexTotalPrimaryShardsPerNodeSetting; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateOverlap; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogDurabilitySettings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex; import static org.opensearch.cluster.metadata.MetadataIndexTemplateService.findComponentTemplate; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; import static org.opensearch.common.settings.AbstractScopedSettings.ARCHIVED_SETTINGS_PREFIX; import static org.opensearch.index.IndexSettings.same; @@ -140,7 +141,7 @@ public void updateSettings( validateRefreshIntervalSettings(normalizedSettings, clusterService.getClusterSettings()); validateTranslogDurabilitySettings(normalizedSettings, clusterService.getClusterSettings(), clusterService.getSettings()); - validateIndexTotalPrimaryShardsPerNodeSetting(normalizedSettings); + validateIndexTotalPrimaryShardsPerNodeSetting(normalizedSettings, clusterService); final int defaultReplicaCount = clusterService.getClusterSettings().get(Metadata.DEFAULT_REPLICA_COUNT_SETTING); Settings.Builder settingsForClosedIndices = Settings.builder(); @@ -549,4 +550,31 @@ private void validateSearchReplicaCountSettings(Settings requestSettings, Index[ } } } + + /** + * Validates the 'index.routing.allocation.total_primary_shards_per_node' setting during index settings update. + * Ensures this setting can only be modified for existing indices in remote store enabled clusters. + */ + public static void validateIndexTotalPrimaryShardsPerNodeSetting(Settings indexSettings, ClusterService clusterService) { + // Get the setting value + int indexPrimaryShardsPerNode = INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.get(indexSettings); + + // If default value (-1), no validation needed + if (indexPrimaryShardsPerNode == -1) { + return; + } + + // Check if remote store is enabled + boolean isRemoteStoreEnabled = clusterService.state() + .nodes() + .getNodes() + .values() + .stream() + .allMatch(DiscoveryNode::isRemoteStoreNode); + if (!isRemoteStoreEnabled) { + throw new IllegalArgumentException( + "Setting [" + INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey() + "] can only be used with remote store enabled clusters" + ); + } + } }