diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index 60ae4d58f343e..90a4f74b5e9f4 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -10,6 +10,8 @@ import org.elasticsearch.gradle.util.Pair import org.elasticsearch.gradle.util.GradleUtils import org.elasticsearch.gradle.internal.test.TestUtil +import org.elasticsearch.gradle.internal.idea.EnablePreviewFeaturesTask +import org.elasticsearch.gradle.internal.idea.IdeaXmlUtil import org.jetbrains.gradle.ext.JUnit import java.nio.file.Files @@ -144,19 +146,10 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { } // modifies the idea module config to enable preview features on ':libs:native' module - tasks.register("enablePreviewFeatures") { + tasks.register("enablePreviewFeatures", EnablePreviewFeaturesTask) { group = 'ide' description = 'Enables preview features on native library module' dependsOn tasks.named("enableExternalConfiguration") - -// ext { - def enablePreview = { moduleFile, languageLevel -> - IdeaXmlUtil.modifyXml(moduleFile) { xml -> - xml.component.find { it.'@name' == 'NewModuleRootManager' }?.'@LANGUAGE_LEVEL' = languageLevel - } - } -// } - doLast { enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.main.iml', 'JDK_21_PREVIEW') enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.test.iml', 'JDK_21_PREVIEW') @@ -277,46 +270,6 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { } } -/** - * Parses a given XML file, applies a set of changes, and writes those changes back to the original file. - * - * @param path Path to existing XML file - * @param action Action to perform on parsed XML document - * @param preface optional front matter to add after the XML declaration - * but before the XML document, e.g. a doctype or comment - */ - -class IdeaXmlUtil { - static Node parseXml(Object xmlPath) { - File xmlFile = new File(xmlPath) - XmlParser xmlParser = new XmlParser(false, true, true) - xmlParser.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false) - Node xml = xmlParser.parse(xmlFile) - return xml - } - - static void modifyXml(Object xmlPath, Action action, String preface = null) { - File xmlFile = new File(xmlPath) - if (xmlFile.exists()) { - Node xml = parseXml(xmlPath) - action.execute(xml) - - xmlFile.withPrintWriter { writer -> - def printer = new XmlNodePrinter(writer) - printer.namespaceAware = true - printer.preserveWhitespace = true - writer.write("\n") - - if (preface != null) { - writer.write(preface) - } - printer.print(xml) - } - } - } -} - - Pair locateElasticsearchWorkspace(Gradle gradle) { if (gradle.parent == null) { // See if any of these included builds is the Elasticsearch gradle diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/EnablePreviewFeaturesTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/EnablePreviewFeaturesTask.java new file mode 100644 index 0000000000000..f8c8b5127827f --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/EnablePreviewFeaturesTask.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.idea; + +import groovy.util.Node; +import groovy.util.NodeList; + +import org.gradle.api.DefaultTask; +import org.xml.sax.SAXException; + +import java.io.IOException; + +import javax.xml.parsers.ParserConfigurationException; + +public class EnablePreviewFeaturesTask extends DefaultTask { + + public void enablePreview(String moduleFile, String languageLevel) throws IOException, ParserConfigurationException, SAXException { + IdeaXmlUtil.modifyXml(moduleFile, xml -> { + // Find the 'component' node + NodeList nodes = (NodeList) xml.depthFirst(); + Node componentNode = null; + for (Object node : nodes) { + Node currentNode = (Node) node; + if ("component".equals(currentNode.name()) && "NewModuleRootManager".equals(currentNode.attribute("name"))) { + componentNode = currentNode; + break; + } + } + + // Add the attribute to the 'component' node + if (componentNode != null) { + componentNode.attributes().put("LANGUAGE_LEVEL", languageLevel); + } + }); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/IdeaXmlUtil.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/IdeaXmlUtil.java new file mode 100644 index 0000000000000..b7cc2862a0af1 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/IdeaXmlUtil.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.idea; + +import groovy.util.Node; +import groovy.util.XmlParser; +import groovy.xml.XmlNodePrinter; + +import org.gradle.api.Action; +import org.xml.sax.SAXException; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; + +import javax.xml.parsers.ParserConfigurationException; + +public class IdeaXmlUtil { + + static Node parseXml(String xmlPath) throws IOException, SAXException, ParserConfigurationException { + File xmlFile = new File(xmlPath); + XmlParser xmlParser = new XmlParser(false, true, true); + xmlParser.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false); + Node xml = xmlParser.parse(xmlFile); + return xml; + } + + /** + * Parses a given XML file, applies a set of changes, and writes those changes back to the original file. + * + * @param path Path to existing XML file + * @param action Action to perform on parsed XML document + * but before the XML document, e.g. a doctype or comment + */ + static void modifyXml(String xmlPath, Action action) throws IOException, ParserConfigurationException, SAXException { + modifyXml(xmlPath, action, null); + } + + /** + * Parses a given XML file, applies a set of changes, and writes those changes back to the original file. + * + * @param path Path to existing XML file + * @param action Action to perform on parsed XML document + * @param preface optional front matter to add after the XML declaration + * but before the XML document, e.g. a doctype or comment + */ + static void modifyXml(String xmlPath, Action action, String preface) throws IOException, ParserConfigurationException, + SAXException { + File xmlFile = new File(xmlPath); + if (xmlFile.exists()) { + Node xml = parseXml(xmlPath); + action.execute(xml); + + try (PrintWriter writer = new PrintWriter(xmlFile)) { + var printer = new XmlNodePrinter(writer); + printer.setNamespaceAware(true); + printer.setPreserveWhitespace(true); + writer.write("\n"); + if (preface != null) { + writer.write(preface); + } + printer.print(xml); + } + } + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java index 265bd52eabe83..916823fd91b61 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java @@ -89,7 +89,6 @@ public static void stopHttpServers() throws IOException { } public void testBuilderUsesDefaultSSLContext() throws Exception { - assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm()); final SSLContext defaultSSLContext = SSLContext.getDefault(); try { try (RestClient client = buildRestClient()) { @@ -97,10 +96,15 @@ public void testBuilderUsesDefaultSSLContext() throws Exception { client.performRequest(new Request("GET", "/")); fail("connection should have been rejected due to SSL handshake"); } catch (Exception e) { - assertThat(e, instanceOf(SSLHandshakeException.class)); + if (inFipsJvm()) { + // Bouncy Castle throw a different exception + assertThat(e, instanceOf(IOException.class)); + assertThat(e.getCause(), instanceOf(javax.net.ssl.SSLException.class)); + } else { + assertThat(e, instanceOf(SSLHandshakeException.class)); + } } } - SSLContext.setDefault(getSslContext()); try (RestClient client = buildRestClient()) { Response response = client.performRequest(new Request("GET", "/")); @@ -112,7 +116,6 @@ public void testBuilderUsesDefaultSSLContext() throws Exception { } public void testBuilderSetsThreadName() throws Exception { - assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm()); final SSLContext defaultSSLContext = SSLContext.getDefault(); try { SSLContext.setDefault(getSslContext()); diff --git a/docs/changelog/118324.yaml b/docs/changelog/118324.yaml new file mode 100644 index 0000000000000..729ff56f6a253 --- /dev/null +++ b/docs/changelog/118324.yaml @@ -0,0 +1,6 @@ +pr: 118324 +summary: Allow the data type of `null` in filters +area: ES|QL +type: bug +issues: + - 116351 diff --git a/docs/changelog/118938.yaml b/docs/changelog/118938.yaml new file mode 100644 index 0000000000000..395da7912fd4b --- /dev/null +++ b/docs/changelog/118938.yaml @@ -0,0 +1,5 @@ +pr: 118938 +summary: Hash functions +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/119250.yaml b/docs/changelog/119250.yaml new file mode 100644 index 0000000000000..9db36957d8050 --- /dev/null +++ b/docs/changelog/119250.yaml @@ -0,0 +1,5 @@ +pr: 119250 +summary: Add rest endpoint for `create_from_source_index` +area: Data streams +type: enhancement +issues: [] diff --git a/docs/reference/esql/functions/description/md5.asciidoc b/docs/reference/esql/functions/description/md5.asciidoc new file mode 100644 index 0000000000000..2ad847c0ce0e3 --- /dev/null +++ b/docs/reference/esql/functions/description/md5.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Computes the MD5 hash of the input. diff --git a/docs/reference/esql/functions/description/sha1.asciidoc b/docs/reference/esql/functions/description/sha1.asciidoc new file mode 100644 index 0000000000000..5bc29f86cc591 --- /dev/null +++ b/docs/reference/esql/functions/description/sha1.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Computes the SHA1 hash of the input. diff --git a/docs/reference/esql/functions/description/sha256.asciidoc b/docs/reference/esql/functions/description/sha256.asciidoc new file mode 100644 index 0000000000000..b2a7ef01e1069 --- /dev/null +++ b/docs/reference/esql/functions/description/sha256.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Computes the SHA256 hash of the input. diff --git a/docs/reference/esql/functions/examples/hash.asciidoc b/docs/reference/esql/functions/examples/hash.asciidoc new file mode 100644 index 0000000000000..492e466eb395e --- /dev/null +++ b/docs/reference/esql/functions/examples/hash.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/hash.csv-spec[tag=hash] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/hash.csv-spec[tag=hash-result] +|=== + diff --git a/docs/reference/esql/functions/examples/md5.asciidoc b/docs/reference/esql/functions/examples/md5.asciidoc new file mode 100644 index 0000000000000..0b43bc5b791c9 --- /dev/null +++ b/docs/reference/esql/functions/examples/md5.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/hash.csv-spec[tag=md5] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/hash.csv-spec[tag=md5-result] +|=== + diff --git a/docs/reference/esql/functions/examples/sha1.asciidoc b/docs/reference/esql/functions/examples/sha1.asciidoc new file mode 100644 index 0000000000000..77786431a738a --- /dev/null +++ b/docs/reference/esql/functions/examples/sha1.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/hash.csv-spec[tag=sha1] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/hash.csv-spec[tag=sha1-result] +|=== + diff --git a/docs/reference/esql/functions/examples/sha256.asciidoc b/docs/reference/esql/functions/examples/sha256.asciidoc new file mode 100644 index 0000000000000..801c36d8effc8 --- /dev/null +++ b/docs/reference/esql/functions/examples/sha256.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/hash.csv-spec[tag=sha256] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/hash.csv-spec[tag=sha256-result] +|=== + diff --git a/docs/reference/esql/functions/kibana/definition/hash.json b/docs/reference/esql/functions/kibana/definition/hash.json index 17a60cf45acfe..dbf4a2542afc5 100644 --- a/docs/reference/esql/functions/kibana/definition/hash.json +++ b/docs/reference/esql/functions/kibana/definition/hash.json @@ -77,6 +77,9 @@ "returnType" : "keyword" } ], + "examples" : [ + "FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL md5 = hash(\"md5\", message), sha256 = hash(\"sha256\", message) \n| KEEP message, md5, sha256;" + ], "preview" : false, "snapshot_only" : false } diff --git a/docs/reference/esql/functions/kibana/definition/md5.json b/docs/reference/esql/functions/kibana/definition/md5.json new file mode 100644 index 0000000000000..4d3a88e123ff4 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/md5.json @@ -0,0 +1,37 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "md5", + "description" : "Computes the MD5 hash of the input.", + "signatures" : [ + { + "params" : [ + { + "name" : "input", + "type" : "keyword", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "input", + "type" : "text", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + } + ], + "examples" : [ + "FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL md5 = md5(message)\n| KEEP message, md5;" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/sha1.json b/docs/reference/esql/functions/kibana/definition/sha1.json new file mode 100644 index 0000000000000..a6abb31368bb3 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/sha1.json @@ -0,0 +1,37 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "sha1", + "description" : "Computes the SHA1 hash of the input.", + "signatures" : [ + { + "params" : [ + { + "name" : "input", + "type" : "keyword", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "input", + "type" : "text", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + } + ], + "examples" : [ + "FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL sha1 = sha1(message)\n| KEEP message, sha1;" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/sha256.json b/docs/reference/esql/functions/kibana/definition/sha256.json new file mode 100644 index 0000000000000..700425d485b61 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/sha256.json @@ -0,0 +1,37 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "sha256", + "description" : "Computes the SHA256 hash of the input.", + "signatures" : [ + { + "params" : [ + { + "name" : "input", + "type" : "keyword", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "input", + "type" : "text", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + } + ], + "examples" : [ + "FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL sha256 = sha256(message)\n| KEEP message, sha256;" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/docs/hash.md b/docs/reference/esql/functions/kibana/docs/hash.md index 9826e80ec5bec..4e937778ba67a 100644 --- a/docs/reference/esql/functions/kibana/docs/hash.md +++ b/docs/reference/esql/functions/kibana/docs/hash.md @@ -5,3 +5,9 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ ### HASH Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512. +``` +FROM sample_data +| WHERE message != "Connection error" +| EVAL md5 = hash("md5", message), sha256 = hash("sha256", message) +| KEEP message, md5, sha256; +``` diff --git a/docs/reference/esql/functions/kibana/docs/md5.md b/docs/reference/esql/functions/kibana/docs/md5.md new file mode 100644 index 0000000000000..aacb8a3960165 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/md5.md @@ -0,0 +1,13 @@ + + +### MD5 +Computes the MD5 hash of the input. + +``` +FROM sample_data +| WHERE message != "Connection error" +| EVAL md5 = md5(message) +| KEEP message, md5; +``` diff --git a/docs/reference/esql/functions/kibana/docs/sha1.md b/docs/reference/esql/functions/kibana/docs/sha1.md new file mode 100644 index 0000000000000..a940aa133f06e --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/sha1.md @@ -0,0 +1,13 @@ + + +### SHA1 +Computes the SHA1 hash of the input. + +``` +FROM sample_data +| WHERE message != "Connection error" +| EVAL sha1 = sha1(message) +| KEEP message, sha1; +``` diff --git a/docs/reference/esql/functions/kibana/docs/sha256.md b/docs/reference/esql/functions/kibana/docs/sha256.md new file mode 100644 index 0000000000000..fbe576c7c20d6 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/sha256.md @@ -0,0 +1,13 @@ + + +### SHA256 +Computes the SHA256 hash of the input. + +``` +FROM sample_data +| WHERE message != "Connection error" +| EVAL sha256 = sha256(message) +| KEEP message, sha256; +``` diff --git a/docs/reference/esql/functions/layout/hash.asciidoc b/docs/reference/esql/functions/layout/hash.asciidoc index 27c55ada6319b..daf7fbf1170b2 100644 --- a/docs/reference/esql/functions/layout/hash.asciidoc +++ b/docs/reference/esql/functions/layout/hash.asciidoc @@ -12,3 +12,4 @@ image::esql/functions/signature/hash.svg[Embedded,opts=inline] include::../parameters/hash.asciidoc[] include::../description/hash.asciidoc[] include::../types/hash.asciidoc[] +include::../examples/hash.asciidoc[] diff --git a/docs/reference/esql/functions/layout/md5.asciidoc b/docs/reference/esql/functions/layout/md5.asciidoc new file mode 100644 index 0000000000000..82d3031d6bdfd --- /dev/null +++ b/docs/reference/esql/functions/layout/md5.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-md5]] +=== `MD5` + +*Syntax* + +[.text-center] +image::esql/functions/signature/md5.svg[Embedded,opts=inline] + +include::../parameters/md5.asciidoc[] +include::../description/md5.asciidoc[] +include::../types/md5.asciidoc[] +include::../examples/md5.asciidoc[] diff --git a/docs/reference/esql/functions/layout/sha1.asciidoc b/docs/reference/esql/functions/layout/sha1.asciidoc new file mode 100644 index 0000000000000..23e1e0e9ac2ab --- /dev/null +++ b/docs/reference/esql/functions/layout/sha1.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-sha1]] +=== `SHA1` + +*Syntax* + +[.text-center] +image::esql/functions/signature/sha1.svg[Embedded,opts=inline] + +include::../parameters/sha1.asciidoc[] +include::../description/sha1.asciidoc[] +include::../types/sha1.asciidoc[] +include::../examples/sha1.asciidoc[] diff --git a/docs/reference/esql/functions/layout/sha256.asciidoc b/docs/reference/esql/functions/layout/sha256.asciidoc new file mode 100644 index 0000000000000..d36a1345271f5 --- /dev/null +++ b/docs/reference/esql/functions/layout/sha256.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-sha256]] +=== `SHA256` + +*Syntax* + +[.text-center] +image::esql/functions/signature/sha256.svg[Embedded,opts=inline] + +include::../parameters/sha256.asciidoc[] +include::../description/sha256.asciidoc[] +include::../types/sha256.asciidoc[] +include::../examples/sha256.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/md5.asciidoc b/docs/reference/esql/functions/parameters/md5.asciidoc new file mode 100644 index 0000000000000..99eba4dc2cb3d --- /dev/null +++ b/docs/reference/esql/functions/parameters/md5.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`input`:: +Input to hash. diff --git a/docs/reference/esql/functions/parameters/sha1.asciidoc b/docs/reference/esql/functions/parameters/sha1.asciidoc new file mode 100644 index 0000000000000..99eba4dc2cb3d --- /dev/null +++ b/docs/reference/esql/functions/parameters/sha1.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`input`:: +Input to hash. diff --git a/docs/reference/esql/functions/parameters/sha256.asciidoc b/docs/reference/esql/functions/parameters/sha256.asciidoc new file mode 100644 index 0000000000000..99eba4dc2cb3d --- /dev/null +++ b/docs/reference/esql/functions/parameters/sha256.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`input`:: +Input to hash. diff --git a/docs/reference/esql/functions/signature/md5.svg b/docs/reference/esql/functions/signature/md5.svg new file mode 100644 index 0000000000000..419af764a212e --- /dev/null +++ b/docs/reference/esql/functions/signature/md5.svg @@ -0,0 +1 @@ +MD5(input) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/sha1.svg b/docs/reference/esql/functions/signature/sha1.svg new file mode 100644 index 0000000000000..bab03a7eb88c8 --- /dev/null +++ b/docs/reference/esql/functions/signature/sha1.svg @@ -0,0 +1 @@ +SHA1(input) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/sha256.svg b/docs/reference/esql/functions/signature/sha256.svg new file mode 100644 index 0000000000000..b77126bbefbd8 --- /dev/null +++ b/docs/reference/esql/functions/signature/sha256.svg @@ -0,0 +1 @@ +SHA256(input) \ No newline at end of file diff --git a/docs/reference/esql/functions/string-functions.asciidoc b/docs/reference/esql/functions/string-functions.asciidoc index da9580a55151a..dd10e4c77581e 100644 --- a/docs/reference/esql/functions/string-functions.asciidoc +++ b/docs/reference/esql/functions/string-functions.asciidoc @@ -18,11 +18,14 @@ * <> * <> * <> +* <> * <> * <> * <> * <> * <> +* <> +* <> * <> * <> * <> @@ -43,11 +46,14 @@ include::layout/left.asciidoc[] include::layout/length.asciidoc[] include::layout/locate.asciidoc[] include::layout/ltrim.asciidoc[] +include::layout/md5.asciidoc[] include::layout/repeat.asciidoc[] include::layout/replace.asciidoc[] include::layout/reverse.asciidoc[] include::layout/right.asciidoc[] include::layout/rtrim.asciidoc[] +include::layout/sha1.asciidoc[] +include::layout/sha256.asciidoc[] include::layout/space.asciidoc[] include::layout/split.asciidoc[] include::layout/starts_with.asciidoc[] diff --git a/docs/reference/esql/functions/types/md5.asciidoc b/docs/reference/esql/functions/types/md5.asciidoc new file mode 100644 index 0000000000000..049a553397bbd --- /dev/null +++ b/docs/reference/esql/functions/types/md5.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +input | result +keyword | keyword +text | keyword +|=== diff --git a/docs/reference/esql/functions/types/sha1.asciidoc b/docs/reference/esql/functions/types/sha1.asciidoc new file mode 100644 index 0000000000000..049a553397bbd --- /dev/null +++ b/docs/reference/esql/functions/types/sha1.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +input | result +keyword | keyword +text | keyword +|=== diff --git a/docs/reference/esql/functions/types/sha256.asciidoc b/docs/reference/esql/functions/types/sha256.asciidoc new file mode 100644 index 0000000000000..049a553397bbd --- /dev/null +++ b/docs/reference/esql/functions/types/sha256.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +input | result +keyword | keyword +text | keyword +|=== diff --git a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc index e3c292cc534bf..30a1039f93db0 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc @@ -557,4 +557,3 @@ The API returns the following results: // TESTRESPONSE[s/"job_version" : "8.4.0"/"job_version" : $body.job_version/] // TESTRESPONSE[s/1656087283340/$body.$_path/] // TESTRESPONSE[s/"superuser"/"_es_test_root"/] -// TESTRESPONSE[s/"ignore_throttled" : true/"ignore_throttled" : true,"failure_store":"exclude"/] diff --git a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java index 66ef1f69c8c3a..8b03aeb178587 100644 --- a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java +++ b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java @@ -37,6 +37,8 @@ public interface EntitlementChecker { void check$java_lang_Runtime$halt(Class callerClass, Runtime runtime, int status); + void check$java_lang_System$$exit(Class callerClass, int status); + //////////////////// // // ClassLoader ctor diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java index 3c12b2f6bc62b..9869af4d85251 100644 --- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java @@ -83,6 +83,7 @@ static CheckAction alwaysDenied(Runnable action) { private static final Map checkActions = Map.ofEntries( entry("runtime_exit", deniedToPlugins(RestEntitlementsCheckAction::runtimeExit)), entry("runtime_halt", deniedToPlugins(RestEntitlementsCheckAction::runtimeHalt)), + entry("system_exit", deniedToPlugins(RestEntitlementsCheckAction::systemExit)), entry("create_classloader", forPlugins(RestEntitlementsCheckAction::createClassLoader)), entry("processBuilder_start", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_start)), entry("processBuilder_startPipeline", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_startPipeline)), @@ -153,6 +154,11 @@ private static void runtimeHalt() { Runtime.getRuntime().halt(123); } + @SuppressForbidden(reason = "Specifically testing System.exit") + private static void systemExit() { + System.exit(123); + } + private static void createClassLoader() { try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) { logger.info("Created URLClassLoader [{}]", classLoader.getName()); diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java index c0a047dc1a458..686fb73e10bc2 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java @@ -51,6 +51,11 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { policyManager.checkExitVM(callerClass); } + @Override + public void check$java_lang_System$$exit(Class callerClass, int status) { + policyManager.checkExitVM(callerClass); + } + @Override public void check$java_lang_ClassLoader$(Class callerClass) { policyManager.checkCreateClassLoader(callerClass); diff --git a/modules/data-streams/build.gradle b/modules/data-streams/build.gradle index 97a5fabd79f4c..60bc8d1dc6a92 100644 --- a/modules/data-streams/build.gradle +++ b/modules/data-streams/build.gradle @@ -20,6 +20,7 @@ restResources { dependencies { testImplementation project(path: ':test:test-clusters') + testImplementation project(":modules:mapper-extras") internalClusterTestImplementation project(":modules:mapper-extras") } @@ -70,4 +71,16 @@ tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("data_stream/200_rollover_failure_store/Lazily roll over a data stream's failure store after a shard failure", "Configuring the failure store via data stream templates is not supported anymore.") task.skipTest("data_stream/200_rollover_failure_store/Don't roll over a data stream's failure store when conditions aren't met", "Configuring the failure store via data stream templates is not supported anymore.") task.skipTest("data_stream/200_rollover_failure_store/Roll over a data stream's failure store with conditions", "Configuring the failure store via data stream templates is not supported anymore.") + + task.skipTest("data_stream/200_rollover_failure_store/Rolling over a failure store on a data stream without the failure store enabled should work", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/200_rollover_failure_store/Rolling over an uninitialized failure store should initialize it", "Rolling over a data stream using target_failure_store is no longer supported.") + + task.skipTest("data_stream/210_rollover_failure_store/A failure store marked for lazy rollover should only be rolled over when there is a failure", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Don't roll over a data stream's failure store when conditions aren't met", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Rolling over a failure store on a data stream without the failure store enabled should work", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Rolling over an uninitialized failure store should initialize it", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Roll over a data stream's failure store with conditions", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Lazily roll over a data stream's failure store after an ingest failure", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Lazily roll over a data stream's failure store after a shard failure", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Roll over a data stream's failure store without conditions", "Rolling over a data stream using target_failure_store is no longer supported.") }) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index 32d080ccc46b1..ac828630b0463 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -31,11 +31,13 @@ import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.DeleteDataStreamAction; import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamAlias; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.index.Index; @@ -136,10 +138,7 @@ public void setup() throws Exception { assertTrue(response.isAcknowledged()); // Initialize the failure store. - RolloverRequest rolloverRequest = new RolloverRequest("with-fs", null); - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() - ); + RolloverRequest rolloverRequest = new RolloverRequest("with-fs::failures", null); response = client.execute(RolloverAction.INSTANCE, rolloverRequest).get(); assertTrue(response.isAcknowledged()); @@ -345,7 +344,7 @@ public void testFailureStoreSnapshotAndRestore() throws Exception { .cluster() .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) - .setIndices(dataStreamName) + .setIndices(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.ALL_APPLICABLE)) .setIncludeGlobalState(false) .get(); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java index e9eaf7b5faddb..bee3989d20ff0 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java @@ -20,11 +20,12 @@ import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.core.Strings; @@ -194,9 +195,9 @@ public void testRejectionFromFailureStore() throws IOException { createDataStream(); // Initialize failure store. - var rolloverRequest = new RolloverRequest(dataStream, null); - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() + var rolloverRequest = new RolloverRequest( + IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES), + null ); var rolloverResponse = client().execute(RolloverAction.INSTANCE, rolloverRequest).actionGet(); var failureStoreIndex = rolloverResponse.getNewIndex(); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java index 482867d072fc2..54e21d5155ed1 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java @@ -60,7 +60,7 @@ public void setup() throws IOException { assertOK(client().performRequest(new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME))); // Initialize the failure store. - assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "/_rollover?target_failure_store"))); + assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "::failures/_rollover"))); ensureGreen(DATA_STREAM_NAME); final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME)); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java deleted file mode 100644 index 85b914be30b2c..0000000000000 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.datastreams; - -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.junit.Before; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; - -/** - * This should be a yaml test, but in order to write one we would need to expose the new parameter in the rest-api-spec. - * We do not want to do that until the feature flag is removed. For this reason, we temporarily, test the affected APIs here. - * Please convert this to a yaml test when the feature flag is removed. - */ -public class FailureStoreQueryParamIT extends DisabledSecurityDataStreamTestCase { - - private static final String DATA_STREAM_NAME = "failure-data-stream"; - private String backingIndex; - private String failureStoreIndex; - - @SuppressWarnings("unchecked") - @Before - public void setup() throws IOException { - Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/ds-template"); - putComposableIndexTemplateRequest.setJsonEntity(""" - { - "index_patterns": ["failure-data-stream"], - "template": { - "settings": { - "number_of_replicas": 0 - }, - "data_stream_options": { - "failure_store": { - "enabled": true - } - } - }, - "data_stream": { - } - } - """); - assertOK(client().performRequest(putComposableIndexTemplateRequest)); - - assertOK(client().performRequest(new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME))); - // Initialize the failure store. - assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "/_rollover?target_failure_store"))); - ensureGreen(DATA_STREAM_NAME); - - final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME)); - List dataStreams = (List) entityAsMap(dataStreamResponse).get("data_streams"); - assertThat(dataStreams.size(), is(1)); - Map dataStream = (Map) dataStreams.get(0); - assertThat(dataStream.get("name"), equalTo(DATA_STREAM_NAME)); - List backingIndices = getIndices(dataStream); - assertThat(backingIndices.size(), is(1)); - List failureStore = getFailureStore(dataStream); - assertThat(failureStore.size(), is(1)); - backingIndex = backingIndices.get(0); - failureStoreIndex = failureStore.get(0); - } - - public void testGetIndexApi() throws IOException { - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME)); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "?failure_store=exclude")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "?failure_store=only")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - @SuppressWarnings("unchecked") - public void testGetIndexStatsApi() throws IOException { - { - final Response statsResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_stats")); - Map indices = (Map) entityAsMap(statsResponse).get("indices"); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response statsResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_stats?failure_store=include") - ); - Map indices = (Map) entityAsMap(statsResponse).get("indices"); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response statsResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_stats?failure_store=only") - ); - Map indices = (Map) entityAsMap(statsResponse).get("indices"); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - public void testGetIndexSettingsApi() throws IOException { - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_settings")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_settings?failure_store=include") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_settings?failure_store=only") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - public void testGetIndexMappingApi() throws IOException { - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=include") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=only") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - @SuppressWarnings("unchecked") - public void testPutIndexMappingApi() throws IOException { - { - final Request mappingRequest = new Request("PUT", "/" + DATA_STREAM_NAME + "/_mapping"); - mappingRequest.setJsonEntity(""" - { - "properties": { - "email": { - "type": "keyword" - } - } - } - """); - assertAcknowledged(client().performRequest(mappingRequest)); - } - { - final Request mappingRequest = new Request("PUT", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=include"); - mappingRequest.setJsonEntity(""" - { - "properties": { - "email": { - "type": "keyword" - } - } - } - """); - ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest(mappingRequest)); - Map response = entityAsMap(responseException.getResponse()); - assertThat(((Map) response.get("error")).get("reason"), is("failure index not supported")); - } - } - - @SuppressWarnings("unchecked") - private List getFailureStore(Map response) { - var failureStore = (Map) response.get("failure_store"); - return getIndices(failureStore); - - } - - @SuppressWarnings("unchecked") - private List getIndices(Map response) { - List> indices = (List>) response.get("indices"); - return indices.stream().map(index -> index.get("index_name")).toList(); - } -} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java index 1d3b1b676282a..cc5e00d8283ad 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.datastreams.DataStreamsStatsAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -102,10 +103,11 @@ protected ClusterBlockException checkRequestBlock( @Override protected String[] resolveConcreteIndexNames(ClusterState clusterState, DataStreamsStatsAction.Request request) { - return DataStreamsActionUtil.resolveConcreteIndexNames( + return DataStreamsActionUtil.resolveConcreteIndexNamesWithSelector( indexNameExpressionResolver, clusterState, request.indices(), + IndexComponentSelector.ALL_APPLICABLE, request.indicesOptions() ).toArray(String[]::new); } @@ -163,13 +165,17 @@ protected DataStreamsStatsAction.DataStreamShardStats readShardResult(StreamInpu request.indicesOptions(), request.indices() ); - for (String abstractionName : abstractionNames) { - IndexAbstraction indexAbstraction = indicesLookup.get(abstractionName); + for (String abstraction : abstractionNames) { + IndexAbstraction indexAbstraction = indicesLookup.get(abstraction); assert indexAbstraction != null; if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) { DataStream dataStream = (DataStream) indexAbstraction; AggregatedStats stats = aggregatedDataStreamsStats.computeIfAbsent(dataStream.getName(), s -> new AggregatedStats()); - dataStream.getIndices().stream().map(Index::getName).forEach(index -> { + dataStream.getBackingIndices().getIndices().stream().map(Index::getName).forEach(index -> { + stats.backingIndices.add(index); + allBackingIndices.add(index); + }); + dataStream.getFailureIndices().getIndices().stream().map(Index::getName).forEach(index -> { stats.backingIndices.add(index); allBackingIndices.add(index); }); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 7d2828e30d5ab..7de3f180753f8 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -33,7 +33,7 @@ import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; @@ -49,6 +49,9 @@ import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; @@ -944,11 +947,6 @@ private Set maybeExecuteForceMerge(ClusterState state, List indice if ((configuredFloorSegmentMerge == null || configuredFloorSegmentMerge.equals(targetMergePolicyFloorSegment) == false) || (configuredMergeFactor == null || configuredMergeFactor.equals(targetMergePolicyFactor) == false)) { UpdateSettingsRequest updateMergePolicySettingsRequest = new UpdateSettingsRequest(); - updateMergePolicySettingsRequest.indicesOptions( - IndicesOptions.builder(updateMergePolicySettingsRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) - .build() - ); updateMergePolicySettingsRequest.indices(indexName); updateMergePolicySettingsRequest.settings( Settings.builder() @@ -998,8 +996,11 @@ private Set maybeExecuteForceMerge(ClusterState state, List indice private void rolloverDataStream(String writeIndexName, RolloverRequest rolloverRequest, ActionListener listener) { // "saving" the rollover target name here so we don't capture the entire request - String rolloverTarget = rolloverRequest.getRolloverTarget(); - logger.trace("Data stream lifecycle issues rollover request for data stream [{}]", rolloverTarget); + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + logger.trace("Data stream lifecycle issues rollover request for data stream [{}]", rolloverRequest.getRolloverTarget()); client.admin().indices().rolloverIndex(rolloverRequest, new ActionListener<>() { @Override public void onResponse(RolloverResponse rolloverResponse) { @@ -1014,7 +1015,7 @@ public void onResponse(RolloverResponse rolloverResponse) { logger.info( "Data stream lifecycle successfully rolled over datastream [{}] due to the following met rollover " + "conditions {}. The new index is [{}]", - rolloverTarget, + rolloverRequest.getRolloverTarget(), metConditions, rolloverResponse.getNewIndex() ); @@ -1024,7 +1025,7 @@ public void onResponse(RolloverResponse rolloverResponse) { @Override public void onFailure(Exception e) { - DataStream dataStream = clusterService.state().metadata().dataStreams().get(rolloverTarget); + DataStream dataStream = clusterService.state().metadata().dataStreams().get(resolvedRolloverTarget.resource()); if (dataStream == null || dataStream.getWriteIndex().getName().equals(writeIndexName) == false) { // the data stream has another write index so no point in recording an error for the previous write index we were // attempting to roll over @@ -1407,9 +1408,7 @@ static RolloverRequest getDefaultRolloverRequest( ) { RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null).masterNodeTimeout(TimeValue.MAX_VALUE); if (rolloverFailureStore) { - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() - ); + rolloverRequest.setRolloverTarget(IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES)); } rolloverRequest.setConditions(rolloverConfiguration.resolveRolloverConditions(dataRetention)); return rolloverRequest; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java index 1595348649528..7992362d791b1 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java @@ -50,7 +50,7 @@ public static final class Request extends AcknowledgedRequest implement .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(false) + .allowSelectors(false) .build() ) .build(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java index 98a29dd636ddf..860bcb5bf2fbe 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java @@ -39,7 +39,9 @@ public static final class Request extends AcknowledgedRequest implement .wildcardOptions( IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false) ) - .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true)) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true).allowSelectors(false) + ) .build(); public Request(StreamInput in) throws IOException { diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java index c1354da1129ca..45bda1abd5c02 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java @@ -50,7 +50,9 @@ public static class Request extends MasterNodeReadRequest implements In .wildcardOptions( IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false) ) - .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true)) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true).allowSelectors(false) + ) .build(); private boolean includeDefaults = false; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java index d055a6972312a..d66b45665d4e2 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java @@ -71,7 +71,9 @@ public static Request parseRequest(XContentParser parser, Factory factory) { .wildcardOptions( IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false) ) - .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true)) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true).allowSelectors(false) + ) .build(); private final DataStreamOptions options; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java index b61e38297397d..be157608b1c3f 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.set.Sets; @@ -42,8 +41,7 @@ public class RestGetDataStreamsAction extends BaseRestHandler { IndicesOptions.WildcardOptions.ALLOW_NO_INDICES, IndicesOptions.GatekeeperOptions.IGNORE_THROTTLED, "verbose" - ), - DataStream.isFailureStoreFeatureFlagEnabled() ? Set.of(IndicesOptions.FAILURE_STORE_QUERY_PARAM) : Set.of() + ) ) ); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java index d5c5193948213..e32636fe40d84 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.DataStreamsStatsAction; import org.elasticsearch.action.datastreams.DeleteDataStreamAction; @@ -22,8 +23,12 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStreamFailureStore; +import org.elasticsearch.cluster.metadata.DataStreamOptions; +import org.elasticsearch.cluster.metadata.ResettableValue; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xcontent.json.JsonXContent; @@ -40,12 +45,14 @@ import static java.lang.Math.max; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; public class DataStreamsStatsTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return List.of(DataStreamsPlugin.class); + return List.of(DataStreamsPlugin.class, MapperExtrasPlugin.class); } private final Set createdDataStreams = new HashSet<>(); @@ -107,8 +114,30 @@ public void testStatsExistingDataStream() throws Exception { assertEquals(stats.getTotalStoreSize().getBytes(), stats.getDataStreams()[0].getStoreSize().getBytes()); } + public void testStatsExistingDataStreamWithFailureStores() throws Exception { + String dataStreamName = createDataStream(false, true); + createFailedDocument(dataStreamName); + + DataStreamsStatsAction.Response stats = getDataStreamsStats(); + + assertEquals(2, stats.getSuccessfulShards()); + assertEquals(0, stats.getFailedShards()); + assertEquals(1, stats.getDataStreamCount()); + assertEquals(2, stats.getBackingIndices()); + assertNotEquals(0L, stats.getTotalStoreSize().getBytes()); + assertEquals(1, stats.getDataStreams().length); + assertEquals(dataStreamName, stats.getDataStreams()[0].getDataStream()); + assertEquals(2, stats.getDataStreams()[0].getBackingIndices()); + // The timestamp is going to not be something we can validate because + // it captures the time of failure which is uncontrolled in the test + // Just make sure it exists by ensuring it isn't zero + assertThat(stats.getDataStreams()[0].getMaximumTimestamp(), is(greaterThan(0L))); + assertNotEquals(0L, stats.getDataStreams()[0].getStoreSize().getBytes()); + assertEquals(stats.getTotalStoreSize().getBytes(), stats.getDataStreams()[0].getStoreSize().getBytes()); + } + public void testStatsExistingHiddenDataStream() throws Exception { - String dataStreamName = createDataStream(true); + String dataStreamName = createDataStream(true, false); long timestamp = createDocument(dataStreamName); DataStreamsStatsAction.Response stats = getDataStreamsStats(true); @@ -221,14 +250,19 @@ public void testStatsMultipleDataStreams() throws Exception { } private String createDataStream() throws Exception { - return createDataStream(false); + return createDataStream(false, false); } - private String createDataStream(boolean hidden) throws Exception { + private String createDataStream(boolean hidden, boolean failureStore) throws Exception { String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.getDefault()); + ResettableValue failureStoreOptions = failureStore == false + ? ResettableValue.undefined() + : ResettableValue.create( + new DataStreamOptions.Template(ResettableValue.create(new DataStreamFailureStore.Template(ResettableValue.create(true)))) + ); Template idxTemplate = new Template(null, new CompressedXContent(""" {"properties":{"@timestamp":{"type":"date"},"data":{"type":"keyword"}}} - """), null); + """), null, null, failureStoreOptions); ComposableIndexTemplate template = ComposableIndexTemplate.builder() .indexPatterns(List.of(dataStreamName + "*")) .template(idxTemplate) @@ -269,6 +303,27 @@ private long createDocument(String dataStreamName) throws Exception { return timestamp; } + private long createFailedDocument(String dataStreamName) throws Exception { + // Get some randomized but reasonable timestamps on the data since not all of it is guaranteed to arrive in order. + long timeSeed = System.currentTimeMillis(); + long timestamp = randomLongBetween(timeSeed - TimeUnit.HOURS.toMillis(5), timeSeed); + client().bulk( + new BulkRequest(dataStreamName).add( + new IndexRequest().opType(DocWriteRequest.OpType.CREATE) + .source( + JsonXContent.contentBuilder() + .startObject() + .field("@timestamp", timestamp) + .object("data", b -> b.field("garbage", randomAlphaOfLength(25))) + .endObject() + ) + ) + ).get(); + indicesAdmin().refresh(new RefreshRequest(".fs-" + dataStreamName + "*").indicesOptions(IndicesOptions.lenientExpandOpenHidden())) + .get(); + return timestamp; + } + private DataStreamsStatsAction.Response getDataStreamsStats() throws Exception { return getDataStreamsStats(false); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 698ab427ab040..ac7dabd868a3f 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; @@ -46,6 +46,7 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -225,11 +226,12 @@ public void testOperationsExecutedOnce() { assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); - assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.FAILURES)); + assertThat( + rolloverFailureIndexRequest.getRolloverTarget(), + is(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.FAILURES)) + ); List deleteRequests = clientSeenRequests.subList(2, 5) .stream() .map(transportRequest -> (DeleteIndexRequest) transportRequest) @@ -1546,11 +1548,12 @@ public void testFailureStoreIsManagedEvenWhenDisabled() { assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); - assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.FAILURES)); + assertThat( + rolloverFailureIndexRequest.getRolloverTarget(), + is(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.FAILURES)) + ); assertThat( ((DeleteIndexRequest) clientSeenRequests.get(2)).indices()[0], is(dataStream.getFailureIndices().getIndices().get(0).getName()) diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml index 13f79e95d99f4..f439cf59bf2d3 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml @@ -148,8 +148,7 @@ # rollover data stream to create new failure store index - do: indices.rollover: - alias: "data-stream-for-modification" - target_failure_store: true + alias: "data-stream-for-modification::failures" - is_true: acknowledged # save index names for later use diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml index cc3a11ffde5e8..51a1e96b1e937 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml @@ -9,7 +9,7 @@ setup: capabilities: [ 'failure_store_in_template' ] - method: POST path: /{index}/_rollover - capabilities: [ 'lazy-rollover-failure-store' ] + capabilities: [ 'lazy-rollover-failure-store', 'index-expression-selectors' ] - do: allowed_warnings: @@ -58,8 +58,7 @@ teardown: - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } @@ -92,8 +91,7 @@ teardown: - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" body: conditions: max_docs: 1 @@ -130,8 +128,7 @@ teardown: - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" body: conditions: max_primary_shard_docs: 2 @@ -165,8 +162,7 @@ teardown: # Mark the failure store for lazy rollover - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" lazy: true - match: { acknowledged: true } @@ -263,8 +259,7 @@ teardown: # Mark the failure store for lazy rollover - do: indices.rollover: - alias: data-stream-for-lazy-rollover - target_failure_store: true + alias: data-stream-for-lazy-rollover::failures lazy: true - match: { acknowledged: true } @@ -332,8 +327,7 @@ teardown: # Mark the failure store for lazy rollover - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" lazy: true - match: { acknowledged: true } @@ -377,16 +371,14 @@ teardown: - do: catch: /Rolling over\/initializing an empty failure store is only supported without conditions\./ indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" body: conditions: max_docs: 1 - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "_none_" } @@ -424,8 +416,7 @@ teardown: # Initializing should work - do: indices.rollover: - alias: "other-data-stream-for-rollover" - target_failure_store: true + alias: "other-data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "_none_" } @@ -448,8 +439,7 @@ teardown: # And "regular" rollover should work - do: indices.rollover: - alias: "other-data-stream-for-rollover" - target_failure_store: true + alias: "other-data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "/\\.fs-other-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java index 766c3ff695f84..bcc6177f8363c 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java @@ -113,14 +113,20 @@ private static SSLContext buildServerSslContext() throws Exception { } public void testClientFailsWithUntrustedCertificate() throws IOException { - assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm()); final List threads = new ArrayList<>(); final Settings.Builder builder = Settings.builder().put("path.home", createTempDir()); final Settings settings = builder.build(); final Environment environment = TestEnvironment.newEnvironment(settings); final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class)); try (RestClient client = Reindexer.buildRestClient(getRemoteInfo(), ssl, 1L, threads)) { - expectThrows(SSLHandshakeException.class, () -> client.performRequest(new Request("GET", "/"))); + if (inFipsJvm()) { + // Bouncy Castle throws a different exception + IOException exception = expectThrows(IOException.class, () -> client.performRequest(new Request("GET", "/"))); + assertThat(exception.getCause(), Matchers.instanceOf(javax.net.ssl.SSLException.class)); + } else { + expectThrows(SSLHandshakeException.class, () -> client.performRequest(new Request("GET", "/"))); + + } } } diff --git a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle index e63b1629db39c..5bbade8cf6fce 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle +++ b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle @@ -50,8 +50,6 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> nonInputProperties.systemProperty('tests.rest.cluster', localCluster.map(c -> c.allHttpSocketURI.join(","))) nonInputProperties.systemProperty('tests.rest.remote_cluster', remoteCluster.map(c -> c.allHttpSocketURI.join(","))) } - - onlyIf("FIPS mode disabled") { buildParams.inFipsJvm == false } } tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json index 299c24f987d8d..47a1bee665506 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json @@ -63,12 +63,6 @@ "type":"boolean", "default":"false", "description":"If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams." - }, - "target_failure_store":{ - "type":"boolean", - "description":"If set to true, the rollover action will be applied on the failure store of the data stream.", - "visibility": "feature_flag", - "feature_flag": "es.failure_store_feature_flag_enabled" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.create_from.json b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.create_from.json new file mode 100644 index 0000000000000..e17a69a77b252 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.create_from.json @@ -0,0 +1,37 @@ +{ + "migrate.create_from":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex.html", + "description":"This API creates a destination from a source index. It copies the mappings and settings from the source index while allowing request settings and mappings to override the source values." + }, + "stability":"experimental", + "visibility":"private", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_create_from/{source}/{dest}", + "methods":[ "PUT", "POST"], + "parts":{ + "source":{ + "type":"string", + "description":"The source index name" + }, + "dest":{ + "type":"string", + "description":"The destination index name" + } + } + } + ] + }, + "body":{ + "description":"The body contains the fields `mappings_override` and `settings_override`.", + "required":false + } + } +} + diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index c5c6a56e52f18..c5bb47ce1e4f7 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -153,7 +153,8 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_ENABLE_NODE_LEVEL_REDUCTION = def(8_818_00_0); public static final TransportVersion JINA_AI_INTEGRATION_ADDED = def(8_819_00_0); public static final TransportVersion TRACK_INDEX_FAILED_DUE_TO_VERSION_CONFLICT_METRIC = def(8_820_00_0); - public static final TransportVersion ELASTIC_INFERENCE_SERVICE_UNIFIED_CHAT_COMPLETIONS_INTEGRATION = def(8_821_00_0); + public static final TransportVersion REPLACE_FAILURE_STORE_OPTIONS_WITH_SELECTOR_SYNTAX = def(8_821_00_0); + public static final TransportVersion ELASTIC_INFERENCE_SERVICE_UNIFIED_CHAT_COMPLETIONS_INTEGRATION = def(8_822_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 03e05ca0e4247..24c427c32d69a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -44,9 +43,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index 801dbbdee0858..be7aaeec8f69e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.info.ClusterInfoRequest; -import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ArrayUtils; @@ -95,13 +94,7 @@ public static Feature[] fromRequest(RestRequest request) { private transient boolean includeDefaults = false; public GetIndexRequest() { - super( - DataStream.isFailureStoreFeatureFlagEnabled() - ? IndicesOptions.builder(IndicesOptions.strictExpandOpen()) - .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) - .build() - : IndicesOptions.strictExpandOpen() - ); + super(IndicesOptions.strictExpandOpen()); } public GetIndexRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 7b782c6da5a84..05cc0d2cf05d8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -82,7 +82,7 @@ public class PutMappingRequest extends AcknowledgedRequest im .allowClosedIndices(true) .allowAliasToMultipleIndices(true) .ignoreThrottled(false) - .allowFailureIndices(false) + .allowSelectors(false) ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index 749470e181deb..24f8735b6bd7f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -20,6 +20,8 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataMappingService; import org.elasticsearch.cluster.service.ClusterService; @@ -40,6 +42,7 @@ import java.util.List; import java.util.Objects; import java.util.Optional; +import java.util.SortedMap; /** * Put mapping action. @@ -106,7 +109,14 @@ protected void masterOperation( return; } - final String message = checkForSystemIndexViolations(systemIndices, concreteIndices, request); + String message = checkForFailureStoreViolations(clusterService.state(), concreteIndices, request); + if (message != null) { + logger.warn(message); + listener.onFailure(new IllegalStateException(message)); + return; + } + + message = checkForSystemIndexViolations(systemIndices, concreteIndices, request); if (message != null) { logger.warn(message); listener.onFailure(new IllegalStateException(message)); @@ -172,6 +182,33 @@ static void performMappingUpdate( metadataMappingService.putMapping(updateRequest, wrappedListener); } + static String checkForFailureStoreViolations(ClusterState clusterState, Index[] concreteIndices, PutMappingRequest request) { + // Requests that a cluster generates itself are permitted to make changes to mappings + // so that rolling upgrade scenarios still work. We check this via the request's origin. + if (Strings.isNullOrEmpty(request.origin()) == false) { + return null; + } + + List violations = new ArrayList<>(); + SortedMap indicesLookup = clusterState.metadata().getIndicesLookup(); + for (Index index : concreteIndices) { + IndexAbstraction indexAbstraction = indicesLookup.get(index.getName()); + if (indexAbstraction != null) { + DataStream maybeDataStream = indexAbstraction.getParentDataStream(); + if (maybeDataStream != null && maybeDataStream.isFailureStoreIndex(index.getName())) { + violations.add(index.getName()); + } + } + } + + if (violations.isEmpty() == false) { + return "Cannot update mappings in " + + violations + + ": mappings for indices contained in data stream failure stores cannot be updated"; + } + return null; + } + static String checkForSystemIndexViolations(SystemIndices systemIndices, Index[] concreteIndices, PutMappingRequest request) { // Requests that a cluster generates itself are permitted to have a difference in mappings // so that rolling upgrade scenarios still work. We check this via the request's origin. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java index f5c100b7884bb..4aa022aff1c80 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -59,6 +59,7 @@ import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; +import java.util.stream.Stream; import static org.elasticsearch.action.search.TransportSearchHelper.checkCCSVersionCompatibility; @@ -598,12 +599,13 @@ private static void mergeResults( private static void enrichIndexAbstraction( ClusterState clusterState, - ResolvedExpression indexAbstraction, + ResolvedExpression resolvedExpression, List indices, List aliases, List dataStreams ) { - IndexAbstraction ia = clusterState.metadata().getIndicesLookup().get(indexAbstraction.resource()); + SortedMap indicesLookup = clusterState.metadata().getIndicesLookup(); + IndexAbstraction ia = indicesLookup.get(resolvedExpression.resource()); if (ia != null) { switch (ia.getType()) { case CONCRETE_INDEX -> { @@ -632,13 +634,24 @@ private static void enrichIndexAbstraction( ); } case ALIAS -> { - String[] indexNames = ia.getIndices().stream().map(Index::getName).toArray(String[]::new); + String[] indexNames = getAliasIndexStream(resolvedExpression, ia, indicesLookup).map(Index::getName) + .toArray(String[]::new); Arrays.sort(indexNames); aliases.add(new ResolvedAlias(ia.getName(), indexNames)); } case DATA_STREAM -> { DataStream dataStream = (DataStream) ia; - String[] backingIndices = dataStream.getIndices().stream().map(Index::getName).toArray(String[]::new); + Stream dataStreamIndices = resolvedExpression.selector() == null + ? dataStream.getIndices().stream() + : switch (resolvedExpression.selector()) { + case DATA -> dataStream.getBackingIndices().getIndices().stream(); + case FAILURES -> dataStream.getFailureIndices().getIndices().stream(); + case ALL_APPLICABLE -> Stream.concat( + dataStream.getBackingIndices().getIndices().stream(), + dataStream.getFailureIndices().getIndices().stream() + ); + }; + String[] backingIndices = dataStreamIndices.map(Index::getName).toArray(String[]::new); dataStreams.add(new ResolvedDataStream(dataStream.getName(), backingIndices, DataStream.TIMESTAMP_FIELD_NAME)); } default -> throw new IllegalStateException("unknown index abstraction type: " + ia.getType()); @@ -646,6 +659,52 @@ private static void enrichIndexAbstraction( } } + private static Stream getAliasIndexStream( + ResolvedExpression resolvedExpression, + IndexAbstraction ia, + SortedMap indicesLookup + ) { + Stream aliasIndices; + if (resolvedExpression.selector() == null) { + aliasIndices = ia.getIndices().stream(); + } else { + aliasIndices = switch (resolvedExpression.selector()) { + case DATA -> ia.getIndices().stream(); + case FAILURES -> { + assert ia.isDataStreamRelated() : "Illegal selector [failures] used on non data stream alias"; + yield ia.getIndices() + .stream() + .map(Index::getName) + .map(indicesLookup::get) + .map(IndexAbstraction::getParentDataStream) + .filter(Objects::nonNull) + .distinct() + .map(DataStream::getFailureIndices) + .flatMap(failureIndices -> failureIndices.getIndices().stream()); + } + case ALL_APPLICABLE -> { + if (ia.isDataStreamRelated()) { + yield Stream.concat( + ia.getIndices().stream(), + ia.getIndices() + .stream() + .map(Index::getName) + .map(indicesLookup::get) + .map(IndexAbstraction::getParentDataStream) + .filter(Objects::nonNull) + .distinct() + .map(DataStream::getFailureIndices) + .flatMap(failureIndices -> failureIndices.getIndices().stream()) + ); + } else { + yield ia.getIndices().stream(); + } + } + }; + } + return aliasIndices; + } + enum Attribute { OPEN, CLOSED, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java index a677897d79633..7b28acdbd8f84 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java @@ -21,6 +21,8 @@ import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; @@ -119,32 +121,38 @@ protected void masterOperation( : "The auto rollover action does not expect any other parameters in the request apart from the data stream name"; Metadata metadata = clusterState.metadata(); - DataStream dataStream = metadata.dataStreams().get(rolloverRequest.getRolloverTarget()); + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean isFailureStoreRollover = resolvedRolloverTarget.selector() != null + && resolvedRolloverTarget.selector().shouldIncludeFailures(); + + DataStream dataStream = metadata.dataStreams().get(resolvedRolloverTarget.resource()); // Skip submitting the task if we detect that the lazy rollover has been already executed. - if (isLazyRolloverNeeded(dataStream, rolloverRequest.targetsFailureStore()) == false) { - DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(rolloverRequest.targetsFailureStore()); + if (isLazyRolloverNeeded(dataStream, isFailureStoreRollover) == false) { + DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(isFailureStoreRollover); listener.onResponse(noopLazyRolloverResponse(targetIndices)); return; } // We evaluate the names of the source index as well as what our newly created index would be. final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames( clusterState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), - rolloverRequest.targetsFailureStore() + isFailureStoreRollover ); final String trialSourceIndexName = trialRolloverNames.sourceName(); final String trialRolloverIndexName = trialRolloverNames.rolloverName(); MetadataCreateIndexService.validateIndexName(trialRolloverIndexName, clusterState.metadata(), clusterState.routingTable()); - assert metadata.dataStreams().containsKey(rolloverRequest.getRolloverTarget()) : "Auto-rollover applies only to data streams"; + assert metadata.dataStreams().containsKey(resolvedRolloverTarget.resource()) : "Auto-rollover applies only to data streams"; String source = "lazy_rollover source [" + trialSourceIndexName + "] to target [" + trialRolloverIndexName + "]"; // We create a new rollover request to ensure that it doesn't contain any other parameters apart from the data stream name // This will provide a more resilient user experience - var newRolloverRequest = new RolloverRequest(rolloverRequest.getRolloverTarget(), null); - newRolloverRequest.setIndicesOptions(rolloverRequest.indicesOptions()); + var newRolloverRequest = new RolloverRequest(resolvedRolloverTarget.combined(), null); LazyRolloverTask rolloverTask = new LazyRolloverTask(newRolloverRequest, listener); lazyRolloverTaskQueue.submitTask(source, rolloverTask, rolloverRequest.masterNodeTimeout()); } @@ -223,12 +231,19 @@ public ClusterState executeTask( AllocationActionMultiListener allocationActionMultiListener ) throws Exception { + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean isFailureStoreRollover = resolvedRolloverTarget.selector() != null + && resolvedRolloverTarget.selector().shouldIncludeFailures(); + // If the data stream has been rolled over since it was marked for lazy rollover, this operation is a noop - final DataStream dataStream = currentState.metadata().dataStreams().get(rolloverRequest.getRolloverTarget()); + final DataStream dataStream = currentState.metadata().dataStreams().get(resolvedRolloverTarget.resource()); assert dataStream != null; - if (isLazyRolloverNeeded(dataStream, rolloverRequest.targetsFailureStore()) == false) { - final DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(rolloverRequest.targetsFailureStore()); + if (isLazyRolloverNeeded(dataStream, isFailureStoreRollover) == false) { + final DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(isFailureStoreRollover); var noopResponse = noopLazyRolloverResponse(targetIndices); notifyAllListeners(rolloverTaskContexts, context -> context.getTask().listener.onResponse(noopResponse)); return currentState; @@ -237,7 +252,7 @@ public ClusterState executeTask( // Perform the actual rollover final var rolloverResult = rolloverService.rolloverClusterState( currentState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), List.of(), @@ -246,7 +261,7 @@ public ClusterState executeTask( false, null, null, - rolloverRequest.targetsFailureStore() + isFailureStoreRollover ); results.add(rolloverResult); logger.trace("lazy rollover result [{}]", rolloverResult); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 552ce727d4249..608d32d50a856 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -16,7 +16,8 @@ import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.mapper.MapperService; @@ -81,7 +82,7 @@ public class RolloverRequest extends AcknowledgedRequest implem private RolloverConditions conditions = new RolloverConditions(); // the index name "_na_" is never read back, what matters are settings, mappings and aliases private CreateIndexRequest createIndexRequest = new CreateIndexRequest("_na_"); - private IndicesOptions indicesOptions = IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + private IndicesOptions indicesOptions = IndicesOptions.strictSingleIndexNoExpandForbidClosedAllowSelectors(); public RolloverRequest(StreamInput in) throws IOException { super(in); @@ -125,12 +126,15 @@ public ActionRequestValidationException validate() { ); } - var selector = indicesOptions.selectorOptions().defaultSelector(); - if (selector == IndexComponentSelector.ALL_APPLICABLE) { - validationException = addValidationError( - "rollover cannot be applied to both regular and failure indices at the same time", - validationException - ); + if (rolloverTarget != null) { + ResolvedExpression resolvedExpression = SelectorResolver.parseExpression(rolloverTarget, indicesOptions); + IndexComponentSelector selector = resolvedExpression.selector(); + if (IndexComponentSelector.ALL_APPLICABLE.equals(selector)) { + validationException = addValidationError( + "rollover cannot be applied to both regular and failure indices at the same time", + validationException + ); + } } return validationException; @@ -162,13 +166,6 @@ public IndicesOptions indicesOptions() { return indicesOptions; } - /** - * @return true of the rollover request targets the failure store, false otherwise. - */ - public boolean targetsFailureStore() { - return DataStream.isFailureStoreFeatureFlagEnabled() && indicesOptions.includeFailureIndices(); - } - public void setIndicesOptions(IndicesOptions indicesOptions) { this.indicesOptions = indicesOptions; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index c5c874f9bcddf..4f0aa9c5bade4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -36,6 +36,8 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadataStats; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; @@ -149,8 +151,7 @@ protected ClusterBlockException checkBlock(RolloverRequest request, ClusterState .matchOpen(request.indicesOptions().expandWildcardsOpen()) .matchClosed(request.indicesOptions().expandWildcardsClosed()) .build(), - IndicesOptions.GatekeeperOptions.DEFAULT, - request.indicesOptions().selectorOptions() + IndicesOptions.GatekeeperOptions.DEFAULT ); return state.blocks() @@ -170,11 +171,18 @@ protected void masterOperation( assert task instanceof CancellableTask; Metadata metadata = clusterState.metadata(); + + // Parse the rollover request's target since the expression it may contain a selector on it + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean targetFailureStore = resolvedRolloverTarget.selector() != null && resolvedRolloverTarget.selector().shouldIncludeFailures(); + // We evaluate the names of the index for which we should evaluate conditions, as well as what our newly created index *would* be. - boolean targetFailureStore = rolloverRequest.targetsFailureStore(); final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames( clusterState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), targetFailureStore @@ -183,7 +191,7 @@ protected void masterOperation( final String trialRolloverIndexName = trialRolloverNames.rolloverName(); MetadataCreateIndexService.validateIndexName(trialRolloverIndexName, metadata, clusterState.routingTable()); - boolean isDataStream = metadata.dataStreams().containsKey(rolloverRequest.getRolloverTarget()); + boolean isDataStream = metadata.dataStreams().containsKey(resolvedRolloverTarget.resource()); if (rolloverRequest.isLazy()) { if (isDataStream == false || rolloverRequest.getConditions().hasConditions()) { String message; @@ -201,7 +209,7 @@ protected void masterOperation( } if (rolloverRequest.isDryRun() == false) { metadataDataStreamsService.setRolloverOnWrite( - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), true, targetFailureStore, rolloverRequest.ackTimeout(), @@ -225,7 +233,7 @@ protected void masterOperation( final IndexAbstraction rolloverTargetAbstraction = clusterState.metadata() .getIndicesLookup() - .get(rolloverRequest.getRolloverTarget()); + .get(resolvedRolloverTarget.resource()); if (rolloverTargetAbstraction.getType() == IndexAbstraction.Type.ALIAS && rolloverTargetAbstraction.isDataStreamRelated()) { listener.onFailure( new IllegalStateException("Aliases to data streams cannot be rolled over. Please rollover the data stream itself.") @@ -246,10 +254,10 @@ protected void masterOperation( final var statsIndicesOptions = new IndicesOptions( IndicesOptions.ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS, IndicesOptions.WildcardOptions.builder().matchClosed(true).allowEmptyExpressions(false).build(), - IndicesOptions.GatekeeperOptions.DEFAULT, - rolloverRequest.indicesOptions().selectorOptions() + IndicesOptions.GatekeeperOptions.DEFAULT ); - IndicesStatsRequest statsRequest = new IndicesStatsRequest().indices(rolloverRequest.getRolloverTarget()) + // Make sure to recombine any selectors on the stats request + IndicesStatsRequest statsRequest = new IndicesStatsRequest().indices(resolvedRolloverTarget.combined()) .clear() .indicesOptions(statsIndicesOptions) .docs(true) @@ -266,9 +274,7 @@ protected void masterOperation( listener.delegateFailureAndWrap((delegate, statsResponse) -> { AutoShardingResult rolloverAutoSharding = null; - final IndexAbstraction indexAbstraction = clusterState.metadata() - .getIndicesLookup() - .get(rolloverRequest.getRolloverTarget()); + final IndexAbstraction indexAbstraction = clusterState.metadata().getIndicesLookup().get(resolvedRolloverTarget.resource()); if (indexAbstraction.getType().equals(IndexAbstraction.Type.DATA_STREAM)) { DataStream dataStream = (DataStream) indexAbstraction; final Optional indexStats = Optional.ofNullable(statsResponse) @@ -492,14 +498,20 @@ public ClusterState executeTask( ) throws Exception { final var rolloverTask = rolloverTaskContext.getTask(); final var rolloverRequest = rolloverTask.rolloverRequest(); + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean targetFailureStore = resolvedRolloverTarget.selector() != null + && resolvedRolloverTarget.selector().shouldIncludeFailures(); // Regenerate the rollover names, as a rollover could have happened in between the pre-check and the cluster state update final var rolloverNames = MetadataRolloverService.resolveRolloverNames( currentState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), - rolloverRequest.targetsFailureStore() + targetFailureStore ); // Re-evaluate the conditions, now with our final source index name @@ -532,7 +544,7 @@ public ClusterState executeTask( final IndexAbstraction rolloverTargetAbstraction = currentState.metadata() .getIndicesLookup() - .get(rolloverRequest.getRolloverTarget()); + .get(resolvedRolloverTarget.resource()); final IndexMetadataStats sourceIndexStats = rolloverTargetAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM ? IndexMetadataStats.fromStatsResponse(rolloverSourceIndex, rolloverTask.statsResponse()) @@ -541,7 +553,7 @@ public ClusterState executeTask( // Perform the actual rollover final var rolloverResult = rolloverService.rolloverClusterState( currentState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), metConditions, @@ -550,7 +562,7 @@ public ClusterState executeTask( false, sourceIndexStats, rolloverTask.autoShardingResult(), - rolloverRequest.targetsFailureStore() + targetFailureStore ); results.add(rolloverResult); logger.trace("rollover result [{}]", rolloverResult); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index b137809047d18..dd473869fb2d9 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.client.internal.node.NodeClient; @@ -216,11 +216,9 @@ private void rollOverFailureStores(Runnable runnable) { } try (RefCountingRunnable refs = new RefCountingRunnable(runnable)) { for (String dataStream : failureStoresToBeRolledOver) { - RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null); - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) - .build() + RolloverRequest rolloverRequest = new RolloverRequest( + IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES), + null ); // We are executing a lazy rollover because it is an action specialised for this situation, when we want an // unconditional and performant rollover. diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 65264faf50129..2a6a789d9d312 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -25,7 +25,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -425,11 +425,7 @@ private void rollOverDataStreams( RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null); rolloverRequest.masterNodeTimeout(bulkRequest.timeout); if (targetFailureStore) { - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) - .build() - ); + rolloverRequest.setRolloverTarget(IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES)); } // We are executing a lazy rollover because it is an action specialised for this situation, when we want an // unconditional and performant rollover. @@ -438,9 +434,8 @@ private void rollOverDataStreams( @Override public void onResponse(RolloverResponse result) { logger.debug( - "Data stream{} {} has {} over, the latest index is {}", - rolloverRequest.targetsFailureStore() ? " failure store" : "", - dataStream, + "Data stream [{}] has {} over, the latest index is {}", + rolloverRequest.getRolloverTarget(), result.isRolledOver() ? "been successfully rolled" : "skipped rolling", result.getNewIndex() ); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java index a0a05138406c5..62caba8f7ed96 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java @@ -9,16 +9,18 @@ package org.elasticsearch.action.datastreams; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.index.Index; +import java.util.ArrayList; import java.util.List; import java.util.SortedMap; -import java.util.stream.Stream; public class DataStreamsActionUtil { @@ -47,25 +49,79 @@ public static IndicesOptions updateIndicesOptions(IndicesOptions indicesOptions) return indicesOptions; } - public static Stream resolveConcreteIndexNames( + public static List resolveConcreteIndexNames( IndexNameExpressionResolver indexNameExpressionResolver, ClusterState clusterState, String[] names, IndicesOptions indicesOptions ) { - List abstractionNames = getDataStreamNames(indexNameExpressionResolver, clusterState, names, indicesOptions); + List abstractionNames = indexNameExpressionResolver.dataStreams( + clusterState, + updateIndicesOptions(indicesOptions), + names + ); SortedMap indicesLookup = clusterState.getMetadata().getIndicesLookup(); - return abstractionNames.stream().flatMap(abstractionName -> { + List results = new ArrayList<>(abstractionNames.size()); + for (ResolvedExpression abstractionName : abstractionNames) { + IndexAbstraction indexAbstraction = indicesLookup.get(abstractionName.resource()); + assert indexAbstraction != null; + if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) { + selectDataStreamIndicesNames( + (DataStream) indexAbstraction, + IndexComponentSelector.FAILURES.equals(abstractionName.selector()), + results + ); + } + } + return results; + } + + /** + * Resolves a list of expressions into data stream names and then collects the concrete indices + * that are applicable for those data streams based on the selector provided in the arguments. + * @param indexNameExpressionResolver resolver object + * @param clusterState state to query + * @param names data stream expressions + * @param selector which component indices of the data stream should be returned + * @param indicesOptions options for expression resolution + * @return A stream of concrete index names that belong to the components specified + * on the data streams returned from the expressions given + */ + public static List resolveConcreteIndexNamesWithSelector( + IndexNameExpressionResolver indexNameExpressionResolver, + ClusterState clusterState, + String[] names, + IndexComponentSelector selector, + IndicesOptions indicesOptions + ) { + assert indicesOptions.allowSelectors() == false : "If selectors are enabled, use resolveConcreteIndexNames instead"; + List abstractionNames = indexNameExpressionResolver.dataStreamNames( + clusterState, + updateIndicesOptions(indicesOptions), + names + ); + SortedMap indicesLookup = clusterState.getMetadata().getIndicesLookup(); + + List results = new ArrayList<>(abstractionNames.size()); + for (String abstractionName : abstractionNames) { IndexAbstraction indexAbstraction = indicesLookup.get(abstractionName); assert indexAbstraction != null; if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) { - DataStream dataStream = (DataStream) indexAbstraction; - List indices = dataStream.getIndices(); - return indices.stream().map(Index::getName); - } else { - return Stream.empty(); + if (selector.shouldIncludeData()) { + selectDataStreamIndicesNames((DataStream) indexAbstraction, false, results); + } + if (selector.shouldIncludeFailures()) { + selectDataStreamIndicesNames((DataStream) indexAbstraction, true, results); + } } - }); + } + return results; + } + + private static void selectDataStreamIndicesNames(DataStream indexAbstraction, boolean failureStore, List accumulator) { + for (Index index : indexAbstraction.getDataStreamIndices(failureStore).getIndices()) { + accumulator.add(index.getName()); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java index 9266bae439b73..82afeec752378 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java @@ -38,8 +38,6 @@ public DataStreamsStatsAction() { public static class Request extends BroadcastRequest { public Request() { - // this doesn't really matter since data stream name resolution isn't affected by IndicesOptions and - // a data stream's backing indices are retrieved from its metadata super( null, IndicesOptions.builder() @@ -58,10 +56,9 @@ public Request() { .allowAliasToMultipleIndices(true) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) - .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build() ); } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java index 4f647d4f02884..640c88918ffc0 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java @@ -61,7 +61,7 @@ public static class Request extends MasterNodeRequest implements Indice .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 883fc543749c2..c55957787aee7 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -72,10 +72,11 @@ public static class Request extends MasterNodeReadRequest implements In .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); + private boolean includeDefaults = false; private boolean verbose = false; diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java index a43d29501a7ee..401bd7a27c6fa 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java @@ -63,7 +63,7 @@ public static class Request extends MasterNodeReadRequest implements In .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java index b054d12890366..c2b7de8d5df8b 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java @@ -94,7 +94,7 @@ public static Request parseRequest(XContentParser parser, Factory factory) { .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(false) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java index 62771230636c1..cce01aca7685a 100644 --- a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java @@ -82,7 +82,7 @@ public String[] indices() { @Override public IndicesOptions indicesOptions() { - return IndicesOptions.STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED; + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index ebbd47336e3da..4231d598b2d70 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -13,7 +13,6 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.core.Nullable; @@ -47,37 +46,13 @@ * @param gatekeeperOptions, applies to all the resolved indices and defines if throttled will be included and if certain type of * aliases or indices are allowed, or they will throw an error. It acts as a gatekeeper when an action * does not support certain options. - * @param selectorOptions, applies to all resolved expressions, and it specifies the index component that should be included, if there - * is no index component defined on the expression level. */ public record IndicesOptions( ConcreteTargetOptions concreteTargetOptions, WildcardOptions wildcardOptions, - GatekeeperOptions gatekeeperOptions, - SelectorOptions selectorOptions + GatekeeperOptions gatekeeperOptions ) implements ToXContentFragment { - /** - * @deprecated this query param will be replaced by the selector `::` on the expression level - */ - @Deprecated - public static final String FAILURE_STORE_QUERY_PARAM = "failure_store"; - /** - * @deprecated this value will be replaced by the selector `::*` on the expression level - */ - @Deprecated - public static final String INCLUDE_ALL = "include"; - /** - * @deprecated this value will be replaced by the selector `::data` on the expression level - */ - @Deprecated - public static final String INCLUDE_ONLY_REGULAR_INDICES = "exclude"; - /** - * @deprecated this value will be replaced by the selector `::failures` on the expression level - */ - @Deprecated - public static final String INCLUDE_ONLY_FAILURE_INDICES = "only"; - public static IndicesOptions.Builder builder() { return new Builder(); } @@ -324,14 +299,14 @@ public static Builder builder(WildcardOptions wildcardOptions) { * - The ignoreThrottled flag, which is a deprecated flag that will filter out frozen indices. * @param allowAliasToMultipleIndices, allow aliases to multiple indices, true by default. * @param allowClosedIndices, allow closed indices, true by default. - * @param allowFailureIndices, allow failure indices in the response, true by default + * @param allowSelectors, allow selectors within index expressions, true by default. * @param ignoreThrottled, filters out throttled (aka frozen indices), defaults to true. This is deprecated and the only one * that only filters and never throws an error. */ public record GatekeeperOptions( boolean allowAliasToMultipleIndices, boolean allowClosedIndices, - boolean allowFailureIndices, + boolean allowSelectors, @Deprecated boolean ignoreThrottled ) implements ToXContentFragment { @@ -355,7 +330,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static class Builder { private boolean allowAliasToMultipleIndices; private boolean allowClosedIndices; - private boolean allowFailureIndices; + private boolean allowSelectors; private boolean ignoreThrottled; public Builder() { @@ -365,7 +340,7 @@ public Builder() { Builder(GatekeeperOptions options) { allowAliasToMultipleIndices = options.allowAliasToMultipleIndices; allowClosedIndices = options.allowClosedIndices; - allowFailureIndices = options.allowFailureIndices; + allowSelectors = options.allowSelectors; ignoreThrottled = options.ignoreThrottled; } @@ -388,11 +363,12 @@ public Builder allowClosedIndices(boolean allowClosedIndices) { } /** - * Failure indices are accepted when true, otherwise the resolution will throw an error. + * Selectors are allowed within index expressions when true, otherwise the resolution will treat their presence as a syntax + * error when resolving index expressions. * Defaults to true. */ - public Builder allowFailureIndices(boolean allowFailureIndices) { - this.allowFailureIndices = allowFailureIndices; + public Builder allowSelectors(boolean allowSelectors) { + this.allowSelectors = allowSelectors; return this; } @@ -405,7 +381,7 @@ public Builder ignoreThrottled(boolean ignoreThrottled) { } public GatekeeperOptions build() { - return new GatekeeperOptions(allowAliasToMultipleIndices, allowClosedIndices, allowFailureIndices, ignoreThrottled); + return new GatekeeperOptions(allowAliasToMultipleIndices, allowClosedIndices, allowSelectors, ignoreThrottled); } } @@ -418,50 +394,6 @@ public static Builder builder(GatekeeperOptions gatekeeperOptions) { } } - /** - * Defines which selectors should be used by default for an index operation in the event that no selectors are provided. - */ - public record SelectorOptions(IndexComponentSelector defaultSelector) implements Writeable { - - public static final SelectorOptions ALL_APPLICABLE = new SelectorOptions(IndexComponentSelector.ALL_APPLICABLE); - public static final SelectorOptions DATA = new SelectorOptions(IndexComponentSelector.DATA); - public static final SelectorOptions FAILURES = new SelectorOptions(IndexComponentSelector.FAILURES); - /** - * Default instance. Uses
::data
as the default selector if none are present in an index expression. - */ - public static final SelectorOptions DEFAULT = DATA; - - public static SelectorOptions read(StreamInput in) throws IOException { - if (in.getTransportVersion().before(TransportVersions.INTRODUCE_ALL_APPLICABLE_SELECTOR)) { - EnumSet set = in.readEnumSet(IndexComponentSelector.class); - if (set.isEmpty() || set.size() == 2) { - assert set.contains(IndexComponentSelector.DATA) && set.contains(IndexComponentSelector.FAILURES) - : "The enum set only supported ::data and ::failures"; - return SelectorOptions.ALL_APPLICABLE; - } else if (set.contains(IndexComponentSelector.DATA)) { - return SelectorOptions.DATA; - } else { - return SelectorOptions.FAILURES; - } - } else { - return new SelectorOptions(IndexComponentSelector.read(in)); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersions.INTRODUCE_ALL_APPLICABLE_SELECTOR)) { - switch (defaultSelector) { - case ALL_APPLICABLE -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES)); - case DATA -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.DATA)); - case FAILURES -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.FAILURES)); - } - } else { - defaultSelector.writeTo(out); - } - } - } - /** * This class is maintained for backwards compatibility and performance purposes. We use it for serialisation along with {@link Option}. */ @@ -497,7 +429,8 @@ private enum Option { ERROR_WHEN_CLOSED_INDICES, IGNORE_THROTTLED, - ALLOW_FAILURE_INDICES // Added in 8.14 + ALLOW_FAILURE_INDICES, // Added in 8.14, Removed in 8.18 + ALLOW_SELECTORS // Added in 8.18 } private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(IndicesOptions.class); @@ -510,8 +443,7 @@ private enum Option { public static final IndicesOptions DEFAULT = new IndicesOptions( ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, WildcardOptions.DEFAULT, - GatekeeperOptions.DEFAULT, - SelectorOptions.DEFAULT + GatekeeperOptions.DEFAULT ); public static final IndicesOptions STRICT_EXPAND_OPEN = IndicesOptions.builder() @@ -528,10 +460,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -547,10 +478,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -566,10 +496,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_NO_SELECTORS = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -585,7 +514,7 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(false) + .allowSelectors(false) .ignoreThrottled(false) ) .build(); @@ -603,10 +532,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -622,10 +550,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -636,10 +563,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN_NO_SELECTOR = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -650,7 +576,7 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(false) + .allowSelectors(false) .ignoreThrottled(false) ) .build(); @@ -668,10 +594,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -682,10 +607,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_NO_SELECTORS = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -696,7 +620,7 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(false) + .allowSelectors(false) .ignoreThrottled(false) ) .build(); @@ -714,10 +638,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -728,10 +651,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -747,10 +669,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -766,10 +687,9 @@ private enum Option { GatekeeperOptions.builder() .allowClosedIndices(false) .allowAliasToMultipleIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -785,10 +705,9 @@ private enum Option { GatekeeperOptions.builder() .allowClosedIndices(false) .allowAliasToMultipleIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED_IGNORE_THROTTLED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -804,10 +723,9 @@ private enum Option { GatekeeperOptions.builder() .ignoreThrottled(true) .allowClosedIndices(false) - .allowFailureIndices(true) + .allowSelectors(true) .allowAliasToMultipleIndices(true) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -823,10 +741,27 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(false) .allowClosedIndices(false) - .allowFailureIndices(true) + .allowSelectors(false) + .ignoreThrottled(false) + ) + .build(); + public static final IndicesOptions STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED_ALLOW_SELECTORS = IndicesOptions.builder() + .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) + .wildcardOptions( + WildcardOptions.builder() + .matchOpen(false) + .matchClosed(false) + .includeHidden(false) + .allowEmptyExpressions(true) + .resolveAliases(true) + ) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(false) + .allowClosedIndices(false) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -842,10 +777,9 @@ private enum Option { GatekeeperOptions.builder() .allowClosedIndices(false) .allowAliasToMultipleIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); /** @@ -903,10 +837,10 @@ public boolean forbidClosedIndices() { } /** - * @return Whether execution on failure indices is allowed. + * @return Whether selectors (::) are allowed in the index expression. */ - public boolean allowFailureIndices() { - return gatekeeperOptions.allowFailureIndices(); + public boolean allowSelectors() { + return DataStream.isFailureStoreFeatureFlagEnabled() && gatekeeperOptions.allowSelectors(); } /** @@ -930,20 +864,6 @@ public boolean ignoreThrottled() { return gatekeeperOptions().ignoreThrottled(); } - /** - * @return whether regular indices (stand-alone or backing indices) will be included in the response - */ - public boolean includeRegularIndices() { - return selectorOptions().defaultSelector().shouldIncludeData(); - } - - /** - * @return whether failure indices (only supported by certain data streams) will be included in the response - */ - public boolean includeFailureIndices() { - return selectorOptions().defaultSelector().shouldIncludeFailures(); - } - public void writeIndicesOptions(StreamOutput out) throws IOException { EnumSet