diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index c80cd99067743..6b4e238e6e0f8 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -48,7 +48,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.17", "8.11.5", "8.12.0", "8.13.0"] + BWC_VERSION: ["7.17.17", "8.12.1", "8.13.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 289139bee61b0..67c352f21d62b 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1793,8 +1793,8 @@ steps: env: BWC_VERSION: 8.11.4 - - label: "{{matrix.image}} / 8.11.5 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.5 + - label: "{{matrix.image}} / 8.12.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.0 timeout_in_minutes: 300 matrix: setup: @@ -1807,10 +1807,10 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.11.5 + BWC_VERSION: 8.12.0 - - label: "{{matrix.image}} / 8.12.0 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.0 + - label: "{{matrix.image}} / 8.12.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.1 timeout_in_minutes: 300 matrix: setup: @@ -1823,7 +1823,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.12.0 + BWC_VERSION: 8.12.1 - label: "{{matrix.image}} / 8.13.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.13.0 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 6e8dc5e5265b3..efd47fc74dd0d 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1102,8 +1102,8 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.11.4 - - label: 8.11.5 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.5#bwcTest + - label: 8.12.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.0#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -1111,9 +1111,9 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.11.5 - - label: 8.12.0 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.0#bwcTest + BWC_VERSION: 8.12.0 + - label: 8.12.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.1#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -1121,7 +1121,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.12.0 + BWC_VERSION: 8.12.1 - label: 8.13.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.13.0#bwcTest timeout_in_minutes: 300 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index f5c724dd4312c..97bce22156c6b 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -109,6 +109,6 @@ BWC_VERSION: - "8.11.2" - "8.11.3" - "8.11.4" - - "8.11.5" - "8.12.0" + - "8.12.1" - "8.13.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 9329a13bc7411..1d509c90d999b 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,4 @@ BWC_VERSION: - "7.17.17" - - "8.11.5" - - "8.12.0" + - "8.12.1" - "8.13.0" diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java index ef834fad424e3..8c5de05a01648 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.threadpool.ThreadPool; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; @@ -166,7 +167,7 @@ public void setUp() throws Exception { .build(); Settings settings = Settings.builder().put("node.name", ShardsAvailabilityHealthIndicatorBenchmark.class.getSimpleName()).build(); - ThreadPool threadPool = new ThreadPool(settings); + ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP); ClusterService clusterService = new ClusterService( Settings.EMPTY, diff --git a/branches.json b/branches.json index b33bb30e77cc4..289928f13daf7 100644 --- a/branches.json +++ b/branches.json @@ -7,9 +7,6 @@ { "branch": "8.12" }, - { - "branch": "8.11" - }, { "branch": "7.17" } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy index 144307912101c..237aa99e4b824 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy @@ -8,6 +8,8 @@ package org.elasticsearch.gradle.internal +import spock.lang.Ignore + import org.apache.commons.compress.archivers.tar.TarArchiveEntry import org.apache.commons.compress.archivers.tar.TarArchiveInputStream import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream @@ -36,6 +38,11 @@ class SymbolicLinkPreservingTarFuncTest extends AbstractGradleFuncTest { final Path linkToRealFolder = archiveSourceRoot.resolve("link-to-real-folder"); Files.createSymbolicLink(linkToRealFolder, Paths.get("./real-folder")); + final Path realFolder2 = testProjectDir.getRoot().toPath().resolve("real-folder2") + final Path realFolderSub = realFolder2.resolve("sub") + Files.createDirectory(realFolder2); + Files.createDirectory(realFolderSub); + buildFile << """ import org.elasticsearch.gradle.internal.SymbolicLinkPreservingTar @@ -56,6 +63,12 @@ tasks.register("buildBZip2Tar", SymbolicLinkPreservingTar) { SymbolicLinkPreserv tar.compression = Compression.BZIP2 tar.preserveFileTimestamps = ${preserverTimestamp} from fileTree("archiveRoot") + + into('config') { + dirMode 0750 + fileMode 0660 + from "real-folder2" + } } """ when: @@ -118,14 +131,20 @@ tasks.register("buildTar", SymbolicLinkPreservingTar) { SymbolicLinkPreservingTa if (entry.getName().equals("real-folder/")) { assert entry.isDirectory() realFolderEntry = true - } else if (entry.getName().equals("real-folder/file")) { + } else if (entry.getName().equals("real-folder/file")) { assert entry.isFile() fileEntry = true } else if (entry.getName().equals("real-folder/link-to-file")) { assert entry.isSymbolicLink() assert normalized(entry.getLinkName()) == "./file" linkToFileEntry = true - } else if (entry.getName().equals("link-in-folder/")) { + } else if (entry.getName().equals("config/")) { + assert entry.isDirectory() + assert entry.getMode() == 16877 + } else if (entry.getName().equals("config/sub/")) { + assert entry.isDirectory() + assert entry.getMode() == 16872 + }else if (entry.getName().equals("link-in-folder/")) { assert entry.isDirectory() linkInFolderEntry = true } else if (entry.getName().equals("link-in-folder/link-to-file")) { diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index a76f507079f2f..1e0b7de03b340 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -2,7 +2,7 @@ elasticsearch = 8.13.0 lucene = 9.9.1 bundled_jdk_vendor = openjdk -bundled_jdk = 21.0.1+12@415e3f918a1f4062a0074a2794853d0d +bundled_jdk = 21.0.2+13@f2283984656d49d69e91c558476027ac # optional dependencies spatial4j = 0.7 jts = 1.15.0 diff --git a/docs/changelog/100031.yaml b/docs/changelog/100031.yaml deleted file mode 100644 index 32aa51d2f9de6..0000000000000 --- a/docs/changelog/100031.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100031 -summary: Add executed pipelines to bulk api response -area: Indices APIs -type: enhancement -issues: [] diff --git a/docs/changelog/100033.yaml b/docs/changelog/100033.yaml deleted file mode 100644 index 92ef6cd289fdc..0000000000000 --- a/docs/changelog/100033.yaml +++ /dev/null @@ -1,9 +0,0 @@ -pr: 100033 -summary: "[Behavioral Analytics] Analytics collections use Data Stream Lifecycle (DSL)\ - \ instead of Index Lifecycle Management (ILM) for data retention management. Behavioral\ - \ analytics has traditionally used ILM to manage data retention. Starting with 8.12.0,\ - \ this will change. Analytics collections created prior to 8.12.0 will continue to use\ - \ their existing ILM policies, but new analytics collections will be managed using DSL." -area: Application -type: feature -issues: [ ] diff --git a/docs/changelog/100236.yaml b/docs/changelog/100236.yaml deleted file mode 100644 index b33825f9bc553..0000000000000 --- a/docs/changelog/100236.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100236 -summary: Record operation purpose for s3 stats collection -area: Distributed -type: enhancement -issues: [] diff --git a/docs/changelog/100287.yaml b/docs/changelog/100287.yaml deleted file mode 100644 index b92855a3342e2..0000000000000 --- a/docs/changelog/100287.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100287 -summary: Add an assertion to the testTransformFeatureReset test case -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/100316.yaml b/docs/changelog/100316.yaml deleted file mode 100644 index 9efb64a332dc1..0000000000000 --- a/docs/changelog/100316.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100316 -summary: Parallelize stale index deletion -area: Snapshot/Restore -type: enhancement -issues: - - 61513 diff --git a/docs/changelog/100333.yaml b/docs/changelog/100333.yaml deleted file mode 100644 index 96a2a62deffe5..0000000000000 --- a/docs/changelog/100333.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100333 -summary: Enable Universal Profiling as Enterprise feature -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/100368.yaml b/docs/changelog/100368.yaml deleted file mode 100644 index 2b9d8dc0b2044..0000000000000 --- a/docs/changelog/100368.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100368 -summary: "Status codes for Aggregation errors, part 2" -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/100383.yaml b/docs/changelog/100383.yaml deleted file mode 100644 index 6cda66149b2cc..0000000000000 --- a/docs/changelog/100383.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100383 -summary: Push s3 requests count via metrics API -area: Distributed -type: enhancement -issues: [] diff --git a/docs/changelog/100392.yaml b/docs/changelog/100392.yaml deleted file mode 100644 index ab693d5ae04ce..0000000000000 --- a/docs/changelog/100392.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100392 -summary: Prevent resource over-subscription in model allocation planner -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/100408.yaml b/docs/changelog/100408.yaml deleted file mode 100644 index 275c3b4a0de48..0000000000000 --- a/docs/changelog/100408.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100408 -summary: "ESQL: Make blocks ref counted" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/100466.yaml b/docs/changelog/100466.yaml deleted file mode 100644 index aaa30876ddfdf..0000000000000 --- a/docs/changelog/100466.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100466 -summary: "Introduce includeShardsStats in the stats request to indicate that we only fetch a summary" -area: Stats -type: enhancement -issues: [99744] diff --git a/docs/changelog/100492.yaml b/docs/changelog/100492.yaml deleted file mode 100644 index e0a1020b49488..0000000000000 --- a/docs/changelog/100492.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100492 -summary: Add runtime field of type `geo_shape` -area: Geo -type: enhancement -issues: - - 61299 diff --git a/docs/changelog/100519.yaml b/docs/changelog/100519.yaml deleted file mode 100644 index 086c6962b3a95..0000000000000 --- a/docs/changelog/100519.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100519 -summary: Disallow vectors whose magnitudes will not fit in a float -area: Vector Search -type: bug -issues: [] diff --git a/docs/changelog/100565.yaml b/docs/changelog/100565.yaml deleted file mode 100644 index 066e9bbb4b227..0000000000000 --- a/docs/changelog/100565.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100565 -summary: "[Monitoring] Dont get cluster state until recovery" -area: Monitoring -type: bug -issues: [] diff --git a/docs/changelog/100570.yaml b/docs/changelog/100570.yaml deleted file mode 100644 index b68a905b0e046..0000000000000 --- a/docs/changelog/100570.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100570 -summary: Added metric for cache eviction of entries with non zero frequency -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/100609.yaml b/docs/changelog/100609.yaml deleted file mode 100644 index c1c63c1af5d4d..0000000000000 --- a/docs/changelog/100609.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100609 -summary: Fix metric gauge creation model -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/100642.yaml b/docs/changelog/100642.yaml deleted file mode 100644 index 805a20174e11d..0000000000000 --- a/docs/changelog/100642.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100642 -summary: "ESQL: Alias duplicated aggregations in a stats" -area: ES|QL -type: enhancement -issues: - - 100544 diff --git a/docs/changelog/100646.yaml b/docs/changelog/100646.yaml deleted file mode 100644 index 63958ff18c4df..0000000000000 --- a/docs/changelog/100646.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100646 -summary: Support complex datemath expressions in index and index alias names -area: Search -type: bug -issues: [] diff --git a/docs/changelog/100776.yaml b/docs/changelog/100776.yaml deleted file mode 100644 index a0bde13f47c92..0000000000000 --- a/docs/changelog/100776.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100776 -summary: Health Report API should not return RED for unassigned cold/frozen shards - when data is available -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/100826.yaml b/docs/changelog/100826.yaml deleted file mode 100644 index 1b1729d1491ea..0000000000000 --- a/docs/changelog/100826.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 100826 -summary: Fix geo tile bounding boxes to be consistent with arithmetic method -area: Geo -type: bug -issues: - - 92611 - - 95574 diff --git a/docs/changelog/100828.yaml b/docs/changelog/100828.yaml deleted file mode 100644 index 6271a1cf2a0a9..0000000000000 --- a/docs/changelog/100828.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100828 -summary: Consider task cancelled exceptions as recoverable -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/100862.yaml b/docs/changelog/100862.yaml deleted file mode 100644 index ce9f119203d9d..0000000000000 --- a/docs/changelog/100862.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100862 -summary: Sending an index name to `DocumentParsingObserver` that is not ever null -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/100899.yaml b/docs/changelog/100899.yaml deleted file mode 100644 index 988546bb22cbe..0000000000000 --- a/docs/changelog/100899.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100899 -summary: Add methods for adding generation listeners with primary term -area: Store -type: enhancement -issues: [] diff --git a/docs/changelog/100921.yaml b/docs/changelog/100921.yaml deleted file mode 100644 index e6e2caa93d465..0000000000000 --- a/docs/changelog/100921.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100921 -summary: "Add support for Serbian Language Analyzer" -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/100938.yaml b/docs/changelog/100938.yaml deleted file mode 100644 index b21f6955c992e..0000000000000 --- a/docs/changelog/100938.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100938 -summary: "Set includeShardsStats = false in NodesStatsRequest where the caller does not use shards-level statistics" -area: Stats -type: enhancement -issues: [] diff --git a/docs/changelog/100974.yaml b/docs/changelog/100974.yaml deleted file mode 100644 index e5d3a4ad3c9df..0000000000000 --- a/docs/changelog/100974.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100974 -summary: Create new cluster state API for querying features present on a cluster -area: "Infra/Core" -type: feature -issues: [] diff --git a/docs/changelog/100990.yaml b/docs/changelog/100990.yaml deleted file mode 100644 index 21b6fb93655cc..0000000000000 --- a/docs/changelog/100990.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100990 -summary: Add status code to `rest.suppressed` log output -area: "Infra/Logging" -type: enhancement -issues: [] diff --git a/docs/changelog/101024.yaml b/docs/changelog/101024.yaml deleted file mode 100644 index edbd3d834526c..0000000000000 --- a/docs/changelog/101024.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101024 -summary: More consistent logging messages for snapshot deletion -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/101026.yaml b/docs/changelog/101026.yaml deleted file mode 100644 index cee85a722d7fa..0000000000000 --- a/docs/changelog/101026.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101026 -summary: Remove `auto_configure` privilege for profiling -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/101032.yaml b/docs/changelog/101032.yaml deleted file mode 100644 index 1c69e372704ce..0000000000000 --- a/docs/changelog/101032.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101032 -summary: Throw when wrapping rate agg in `DeferableBucketAggregator` -area: TSDB -type: bug -issues: [] diff --git a/docs/changelog/101050.yaml b/docs/changelog/101050.yaml deleted file mode 100644 index 1a68466e6e728..0000000000000 --- a/docs/changelog/101050.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101050 -summary: Ensure the correct `threadContext` for `RemoteClusterNodesAction` -area: Network -type: bug -issues: [] diff --git a/docs/changelog/101055.yaml b/docs/changelog/101055.yaml deleted file mode 100644 index e4ca4548c2ef6..0000000000000 --- a/docs/changelog/101055.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101055 -summary: Make tasks that calculate checkpoints time out -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/101057.yaml b/docs/changelog/101057.yaml deleted file mode 100644 index 2024c714f58b0..0000000000000 --- a/docs/changelog/101057.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101057 -summary: Add error logging for *QL -area: EQL -type: enhancement -issues: [] diff --git a/docs/changelog/101066.yaml b/docs/changelog/101066.yaml deleted file mode 100644 index 2fac601d65674..0000000000000 --- a/docs/changelog/101066.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101066 -summary: Log errors in `RestResponse` regardless of `error_trace` parameter -area: "Infra/Core" -type: enhancement -issues: - - 100884 diff --git a/docs/changelog/101093.yaml b/docs/changelog/101093.yaml deleted file mode 100644 index 99765170dd257..0000000000000 --- a/docs/changelog/101093.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101093 -summary: Make IPAddress writeable -area: Infra/Scripting -type: bug -issues: - - 101082 diff --git a/docs/changelog/101126.yaml b/docs/changelog/101126.yaml deleted file mode 100644 index 7a0f45891b171..0000000000000 --- a/docs/changelog/101126.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101126 -summary: Include totals in flamegraph response -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/101147.yaml b/docs/changelog/101147.yaml deleted file mode 100644 index cb556af35eead..0000000000000 --- a/docs/changelog/101147.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101147 -summary: Persist data counts on job close before results index refresh -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/101148.yaml b/docs/changelog/101148.yaml deleted file mode 100644 index eabe288e69e88..0000000000000 --- a/docs/changelog/101148.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101148 -summary: Add support for marking component templates as deprecated -area: Indices APIs -type: enhancement -issues: - - 100992 diff --git a/docs/changelog/101185.yaml b/docs/changelog/101185.yaml deleted file mode 100644 index 63d3a4da328b1..0000000000000 --- a/docs/changelog/101185.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101185 -summary: Repo analysis of uncontended register behaviour -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/101202.yaml b/docs/changelog/101202.yaml deleted file mode 100644 index 565338a2dbb6e..0000000000000 --- a/docs/changelog/101202.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101202 -summary: Optimize `MurmurHash3` -area: "Ingest Node" -type: enhancement -issues: [] diff --git a/docs/changelog/101230.yaml b/docs/changelog/101230.yaml deleted file mode 100644 index 3ed7eacb3fce0..0000000000000 --- a/docs/changelog/101230.yaml +++ /dev/null @@ -1,12 +0,0 @@ -pr: 101230 -summary: Enable query phase parallelism within a single shard -area: Search -type: enhancement -issues: - - 80693 -highlight: - title: Enable query phase parallelism within a single shard - body: |- - Activate inter-segment search concurrency by default in the query phase, in order to - enable parallelizing search execution across segments that a single shard is made of. - notable: true diff --git a/docs/changelog/101235.yaml b/docs/changelog/101235.yaml deleted file mode 100644 index 53adf9527c2c4..0000000000000 --- a/docs/changelog/101235.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101235 -summary: Load different way -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/101311.yaml b/docs/changelog/101311.yaml deleted file mode 100644 index e4786b937e060..0000000000000 --- a/docs/changelog/101311.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101311 -summary: Cache resolved index for mgets -area: CRUD -type: enhancement -issues: [] diff --git a/docs/changelog/101333.yaml b/docs/changelog/101333.yaml deleted file mode 100644 index 4452687b995d3..0000000000000 --- a/docs/changelog/101333.yaml +++ /dev/null @@ -1,29 +0,0 @@ -pr: 101333 -summary: Fixed JWT principal from claims -area: Authorization -type: breaking -issues: [] -breaking: - title: Fixed JWT principal from claims - area: Authorization - details: "This changes the format of a JWT's principal before the JWT is actually\ - \ validated by any JWT realm. The JWT's principal is a convenient way to refer\ - \ to a JWT that has not yet been verified by a JWT realm. The JWT's principal\ - \ is printed in the audit and regular logs (notably for auditing authn failures)\ - \ as well as the smart realm chain reordering optimization. The JWT principal\ - \ is NOT required to be identical to the JWT-authenticated user's principal, but\ - \ in general, they should be similar. Previously, the JWT's principal was built\ - \ by individual realms in the same way the realms built the authenticated user's\ - \ principal. This had the advantage that, in simpler JWT realms configurations\ - \ (e.g. a single JWT realm in the chain), the JWT principal and the authenticated\ - \ user's principal are very similar. However the drawback is that, in general,\ - \ the JWT principal and the user principal can be very different (i.e. in the\ - \ case where one JWT realm builds the JWT principal and a different one builds\ - \ the user principal). Another downside is that the (unauthenticated) JWT principal\ - \ depended on realm ordering, which makes identifying the JWT from its principal\ - \ dependent on the ES authn realm configuration. This PR implements a consistent\ - \ fixed logic to build the JWT principal, which only depends on the JWT's claims\ - \ and no ES configuration." - impact: "Users will observe changed format and values for the `user.name` attribute\ - \ of `authentication_failed` audit log events, in the JWT (failed) authn case." - notable: false diff --git a/docs/changelog/101346.yaml b/docs/changelog/101346.yaml deleted file mode 100644 index b32b123c506d1..0000000000000 --- a/docs/changelog/101346.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101346 -summary: Report full stack trace for non-state file settings transforms -area: Infra/Settings -type: bug -issues: [] diff --git a/docs/changelog/101383.yaml b/docs/changelog/101383.yaml deleted file mode 100644 index 4875403acfaeb..0000000000000 --- a/docs/changelog/101383.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101383 -summary: "ESQL: Track memory from values loaded from lucene" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/101385.yaml b/docs/changelog/101385.yaml deleted file mode 100644 index 406ed804cbbcc..0000000000000 --- a/docs/changelog/101385.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101385 -summary: "ESQL: Fix planning of MV_EXPAND with foldable expressions" -area: ES|QL -type: bug -issues: - - 101118 diff --git a/docs/changelog/101390.yaml b/docs/changelog/101390.yaml deleted file mode 100644 index 23bdef6e39dfe..0000000000000 --- a/docs/changelog/101390.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101390 -summary: Enable inter-segment concurrency for terms aggs -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/101392.yaml b/docs/changelog/101392.yaml deleted file mode 100644 index af79917245726..0000000000000 --- a/docs/changelog/101392.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101392 -summary: Include ML processor limits in `_ml/info` response -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/101396.yaml b/docs/changelog/101396.yaml deleted file mode 100644 index a486b2bed9237..0000000000000 --- a/docs/changelog/101396.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101396 -summary: "ESQL: Track blocks emitted from lucene" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/101409.yaml b/docs/changelog/101409.yaml deleted file mode 100644 index 82e7f339fdd89..0000000000000 --- a/docs/changelog/101409.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101409 -summary: Adding a simulate ingest api -area: Ingest Node -type: feature -issues: [] diff --git a/docs/changelog/101423.yaml b/docs/changelog/101423.yaml deleted file mode 100644 index a5497d444797f..0000000000000 --- a/docs/changelog/101423.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101423 -summary: Export circuit breaker trip count as a counter metric -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/101426.yaml b/docs/changelog/101426.yaml deleted file mode 100644 index f9053ba1c1ec1..0000000000000 --- a/docs/changelog/101426.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101426 -summary: Add undesired shard count -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/101457.yaml b/docs/changelog/101457.yaml deleted file mode 100644 index 03bdbe39b5b8e..0000000000000 --- a/docs/changelog/101457.yaml +++ /dev/null @@ -1,14 +0,0 @@ -pr: 101457 -summary: "Remove Plugin.createComponents method in favour of overload with a PluginServices object" -area: Infra/Plugins -type: breaking-java -breaking: - area: "Java API" - title: "Plugin.createComponents method has been refactored to take a single PluginServices object" - details: > - Plugin.createComponents currently takes several different service arguments. The signature of this method changes - every time a new service is added. The method has now been modified to take a single interface object - that new services are added to. This will reduce API incompatibility issues when a new service - is introduced in the future. - impact: "Plugins that override createComponents will need to be refactored to override the new method on ES 8.12+" - notable: false diff --git a/docs/changelog/101474.yaml b/docs/changelog/101474.yaml deleted file mode 100644 index 2c013fe5d2537..0000000000000 --- a/docs/changelog/101474.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101474 -summary: "[Search Applications] Return 400 response when template rendering produces invalid JSON" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/101488.yaml b/docs/changelog/101488.yaml deleted file mode 100644 index 1db48a63f8542..0000000000000 --- a/docs/changelog/101488.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101488 -summary: "ESQL: More tracking in `BlockHash` impls" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/101518.yaml b/docs/changelog/101518.yaml deleted file mode 100644 index 53db542640348..0000000000000 --- a/docs/changelog/101518.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101518 -summary: Check that scripts produce correct json in render template action -area: Search -type: bug -issues: - - 101477 diff --git a/docs/changelog/101535.yaml b/docs/changelog/101535.yaml deleted file mode 100644 index 79ed78fa1d7a1..0000000000000 --- a/docs/changelog/101535.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101535 -summary: Disable inter-segment concurrency when sorting by field -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/101577.yaml b/docs/changelog/101577.yaml deleted file mode 100644 index e485fd3811cb6..0000000000000 --- a/docs/changelog/101577.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101577 -summary: Add metrics to the shared blob cache -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/101585.yaml b/docs/changelog/101585.yaml deleted file mode 100644 index 71815df1f48d9..0000000000000 --- a/docs/changelog/101585.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101585 -summary: Reroute on shard snapshot completion -area: Snapshot/Restore -type: bug -issues: - - 101514 diff --git a/docs/changelog/101607.yaml b/docs/changelog/101607.yaml deleted file mode 100644 index 18ee7f1bdc5cc..0000000000000 --- a/docs/changelog/101607.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101607 -summary: Log stacktrace together with log message in order to help debugging -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/101609.yaml b/docs/changelog/101609.yaml deleted file mode 100644 index 27993574743d2..0000000000000 --- a/docs/changelog/101609.yaml +++ /dev/null @@ -1,9 +0,0 @@ -pr: 101609 -summary: > - Add a node feature join barrier. This prevents nodes from joining clusters that do not have - all the features already present in the cluster. This ensures that once a features is supported - by all the nodes in a cluster, that feature will never then not be supported in the future. - This is the corresponding functionality for the version join barrier, but for features -area: "Cluster Coordination" -type: feature -issues: [] diff --git a/docs/changelog/101660.yaml b/docs/changelog/101660.yaml deleted file mode 100644 index cb3d3118d15a6..0000000000000 --- a/docs/changelog/101660.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101660 -summary: Fall through malformed JWTs to subsequent realms in the chain -area: Authentication -type: bug -issues: - - 101367 diff --git a/docs/changelog/101682.yaml b/docs/changelog/101682.yaml deleted file mode 100644 index e512006057581..0000000000000 --- a/docs/changelog/101682.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101682 -summary: "Add manage_enrich cluster privilege to kibana_system role" -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/101700.yaml b/docs/changelog/101700.yaml deleted file mode 100644 index 08671360688a7..0000000000000 --- a/docs/changelog/101700.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101700 -summary: Fix `lastUnsafeSegmentGenerationForGets` for realtime get -area: Engine -type: bug -issues: [] diff --git a/docs/changelog/101723.yaml b/docs/changelog/101723.yaml deleted file mode 100644 index 146d164805f00..0000000000000 --- a/docs/changelog/101723.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101723 -summary: Allowing non-dynamic index settings to be updated by automatically unassigning - shards -area: Indices APIs -type: enhancement -issues: [] diff --git a/docs/changelog/101727.yaml b/docs/changelog/101727.yaml deleted file mode 100644 index 24a7e1d5b4e48..0000000000000 --- a/docs/changelog/101727.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101727 -summary: Fix listeners in `SharedBlobCacheService.readMultiRegions` -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/101753.yaml b/docs/changelog/101753.yaml deleted file mode 100644 index 7b64075998430..0000000000000 --- a/docs/changelog/101753.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101753 -summary: Expose roles by default in cat allocation API -area: CAT APIs -type: enhancement -issues: [] diff --git a/docs/changelog/101788.yaml b/docs/changelog/101788.yaml deleted file mode 100644 index b7cc1e20663e8..0000000000000 --- a/docs/changelog/101788.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101788 -summary: "ESQL: Narrow catch in convert functions" -area: ES|QL -type: bug -issues: - - 100820 diff --git a/docs/changelog/101802.yaml b/docs/changelog/101802.yaml deleted file mode 100644 index 20e857c32f664..0000000000000 --- a/docs/changelog/101802.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101802 -summary: Correctly logging watcher history write failures -area: Watcher -type: bug -issues: [] diff --git a/docs/changelog/101815.yaml b/docs/changelog/101815.yaml deleted file mode 100644 index 511e23beb68ef..0000000000000 --- a/docs/changelog/101815.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101815 -summary: Run `TransportGetAliasesAction` on local node -area: Indices APIs -type: enhancement -issues: [] diff --git a/docs/changelog/101826.yaml b/docs/changelog/101826.yaml deleted file mode 100644 index 87f3f8df1b0c2..0000000000000 --- a/docs/changelog/101826.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101826 -summary: Support keyed histograms -area: Aggregations -type: enhancement -issues: - - 100242 diff --git a/docs/changelog/101845.yaml b/docs/changelog/101845.yaml deleted file mode 100644 index 0dd95bdabca57..0000000000000 --- a/docs/changelog/101845.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101845 -summary: Introduce new endpoint to expose data stream lifecycle stats -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/101846.yaml b/docs/changelog/101846.yaml deleted file mode 100644 index 52dfff8801c62..0000000000000 --- a/docs/changelog/101846.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101846 -summary: Set `ActiveProcessorCount` when `node.processors` is set -area: Infra/CLI -type: enhancement -issues: [] diff --git a/docs/changelog/101847.yaml b/docs/changelog/101847.yaml deleted file mode 100644 index 91922b9e23ed0..0000000000000 --- a/docs/changelog/101847.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101847 -summary: Add an additional tiebreaker to RRF -area: Ranking -type: bug -issues: - - 101232 diff --git a/docs/changelog/101859.yaml b/docs/changelog/101859.yaml deleted file mode 100644 index 54f3fb12810ca..0000000000000 --- a/docs/changelog/101859.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101859 -summary: Cover head/tail commands edge cases and data types coverage -area: EQL -type: bug -issues: - - 101724 diff --git a/docs/changelog/101868.yaml b/docs/changelog/101868.yaml deleted file mode 100644 index d7cf650d25ed2..0000000000000 --- a/docs/changelog/101868.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101868 -summary: Read scores from downloaded vocabulary for XLM Roberta tokenizers -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/101904.yaml b/docs/changelog/101904.yaml deleted file mode 100644 index cad422cc52e15..0000000000000 --- a/docs/changelog/101904.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101904 -summary: Allow granting API keys with JWT as the access_token -area: Security -type: feature -issues: [] diff --git a/docs/changelog/101979.yaml b/docs/changelog/101979.yaml deleted file mode 100644 index ad119df24d36f..0000000000000 --- a/docs/changelog/101979.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101979 -summary: Calculate CO2 and emmission and costs -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/101989.yaml b/docs/changelog/101989.yaml deleted file mode 100644 index d294d194bd4e8..0000000000000 --- a/docs/changelog/101989.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101989 -summary: Add message field to `HealthPeriodicLogger` and `S3RequestRetryStats` -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/102020.yaml b/docs/changelog/102020.yaml deleted file mode 100644 index 7c74e9676d342..0000000000000 --- a/docs/changelog/102020.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102020 -summary: Retrieve stacktrace events from a custom index -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/102032.yaml b/docs/changelog/102032.yaml deleted file mode 100644 index 40463b9f252b9..0000000000000 --- a/docs/changelog/102032.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102032 -summary: Add vector_operation_count in profile output for knn searches -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/102048.yaml b/docs/changelog/102048.yaml deleted file mode 100644 index 54bc1d9eae52e..0000000000000 --- a/docs/changelog/102048.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102048 -summary: "Repo analysis: verify empty register" -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/102051.yaml b/docs/changelog/102051.yaml deleted file mode 100644 index c3ca4a546928f..0000000000000 --- a/docs/changelog/102051.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102051 -summary: "Repo analysis: allow configuration of register ops" -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/102056.yaml b/docs/changelog/102056.yaml deleted file mode 100644 index 455f66ba90b03..0000000000000 --- a/docs/changelog/102056.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102056 -summary: Use `BulkRequest` to store Application Privileges -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/102065.yaml b/docs/changelog/102065.yaml deleted file mode 100644 index 1a9a219df4502..0000000000000 --- a/docs/changelog/102065.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102065 -summary: Add more desired balance stats -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/102075.yaml b/docs/changelog/102075.yaml deleted file mode 100644 index 54daae04169db..0000000000000 --- a/docs/changelog/102075.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102075 -summary: Accept a single or multiple inputs to `_inference` -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102089.yaml b/docs/changelog/102089.yaml deleted file mode 100644 index 9f33c0648d09f..0000000000000 --- a/docs/changelog/102089.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102089 -summary: Add prefix strings option to trained models -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102093.yaml b/docs/changelog/102093.yaml deleted file mode 100644 index f6922c0d36be6..0000000000000 --- a/docs/changelog/102093.yaml +++ /dev/null @@ -1,14 +0,0 @@ -pr: 102093 -summary: Add byte quantization for float vectors in HNSW -area: Vector Search -type: feature -issues: [] -highlight: - title: Add new `int8_hsnw` index type for int8 quantization for HNSW - body: |- - This commit adds a new index type called `int8_hnsw`. This index will - automatically quantized float32 values into int8 byte values. While - this increases disk usage by 25%, it reduces memory required for - fast HNSW search by 75%. Dramatically reducing the resource overhead - required for dense vector search. - notable: true diff --git a/docs/changelog/102138.yaml b/docs/changelog/102138.yaml deleted file mode 100644 index 3819e3201150e..0000000000000 --- a/docs/changelog/102138.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102138 -summary: Skip shards that don't match the source query during checkpointing -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/102140.yaml b/docs/changelog/102140.yaml deleted file mode 100644 index 0f086649b9710..0000000000000 --- a/docs/changelog/102140.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102140 -summary: Collect data tiers usage stats more efficiently -area: ILM+SLM -type: bug -issues: - - 100230 \ No newline at end of file diff --git a/docs/changelog/102165.yaml b/docs/changelog/102165.yaml deleted file mode 100644 index e1c4c76f1f6ff..0000000000000 --- a/docs/changelog/102165.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102165 -summary: Fix planning of duplicate aggs -area: ES|QL -type: bug -issues: - - 102083 diff --git a/docs/changelog/102172.yaml b/docs/changelog/102172.yaml deleted file mode 100644 index 485c2c4327e11..0000000000000 --- a/docs/changelog/102172.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102172 -summary: Adjust Histogram's bucket accounting to be iteratively -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/102177.yaml b/docs/changelog/102177.yaml deleted file mode 100644 index 62d7b11b86513..0000000000000 --- a/docs/changelog/102177.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102177 -summary: "GEO_POINT and CARTESIAN_POINT type support" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/102183.yaml b/docs/changelog/102183.yaml deleted file mode 100644 index 3daa1418ba5d0..0000000000000 --- a/docs/changelog/102183.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 102183 -summary: "[ES|QL] pow function always returns double" -area: ES|QL -type: "breaking" -issues: - - 99055 -breaking: - title: "[ES|QL] pow function always returns double" - area: REST API - details: "In ES|QL, the pow function no longer returns the type of its inputs, instead\ - \ always returning a double." - impact: low. Most queries should continue to function with the change. - notable: false diff --git a/docs/changelog/102184.yaml b/docs/changelog/102184.yaml deleted file mode 100644 index ba4d045b6b0aa..0000000000000 --- a/docs/changelog/102184.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102184 -summary: Track ESQL enrich memory -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102188.yaml b/docs/changelog/102188.yaml deleted file mode 100644 index 595a8395fab5c..0000000000000 --- a/docs/changelog/102188.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102188 -summary: Track blocks in `AsyncOperator` -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102190.yaml b/docs/changelog/102190.yaml deleted file mode 100644 index cd04e041fca5e..0000000000000 --- a/docs/changelog/102190.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102190 -summary: Track pages in ESQL enrich request/response -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102192.yaml b/docs/changelog/102192.yaml deleted file mode 100644 index 531aa943c9e36..0000000000000 --- a/docs/changelog/102192.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102192 -summary: "ESQL: Load more than one field at once" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102193.yaml b/docs/changelog/102193.yaml deleted file mode 100644 index 4d64493602ff2..0000000000000 --- a/docs/changelog/102193.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102193 -summary: Fix cache invalidation on privilege modification -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/102208.yaml b/docs/changelog/102208.yaml deleted file mode 100644 index b566a85753d82..0000000000000 --- a/docs/changelog/102208.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102208 -summary: Add static node settings to set default values for max merged segment sizes -area: Engine -type: enhancement -issues: [] diff --git a/docs/changelog/102244.yaml b/docs/changelog/102244.yaml deleted file mode 100644 index 3b160e033b57e..0000000000000 --- a/docs/changelog/102244.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102244 -summary: Expose reconciliation metrics via APM -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/102245.yaml b/docs/changelog/102245.yaml deleted file mode 100644 index 387540d96290c..0000000000000 --- a/docs/changelog/102245.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102245 -summary: Add non-green indicator names to `HealthPeriodicLogger` message -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/102248.yaml b/docs/changelog/102248.yaml deleted file mode 100644 index 854e8afde4086..0000000000000 --- a/docs/changelog/102248.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102248 -summary: Node stats as metrics -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/102273.yaml b/docs/changelog/102273.yaml deleted file mode 100644 index 78ecc8b2d2734..0000000000000 --- a/docs/changelog/102273.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102273 -summary: Improve analyzer reload log message -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/102292.yaml b/docs/changelog/102292.yaml deleted file mode 100644 index 953c3ffdf6150..0000000000000 --- a/docs/changelog/102292.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102292 -summary: Consider duplicate stacktraces in custom index -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/102317.yaml b/docs/changelog/102317.yaml deleted file mode 100644 index 89b2ae5432101..0000000000000 --- a/docs/changelog/102317.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102317 -summary: "ESQL: Fix single value query" -area: ES|QL -type: bug -issues: - - 102298 diff --git a/docs/changelog/102350.yaml b/docs/changelog/102350.yaml deleted file mode 100644 index 00a311c5d99f8..0000000000000 --- a/docs/changelog/102350.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102350 -summary: "ESQL: Fix rare bug with empty string" -area: ES|QL -type: bug -issues: - - 101969 diff --git a/docs/changelog/102371.yaml b/docs/changelog/102371.yaml new file mode 100644 index 0000000000000..5a698bc9d671a --- /dev/null +++ b/docs/changelog/102371.yaml @@ -0,0 +1,5 @@ +pr: 102371 +summary: Adding threadpool metrics +area: Infra/Core +type: enhancement +issues: [] diff --git a/docs/changelog/102379.yaml b/docs/changelog/102379.yaml deleted file mode 100644 index 0773b137779a5..0000000000000 --- a/docs/changelog/102379.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102379 -summary: Pass source query to `_field_caps` (as `index_filter`) when deducing destination index mappings for better - performance -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/102388.yaml b/docs/changelog/102388.yaml deleted file mode 100644 index 3e65e46949bda..0000000000000 --- a/docs/changelog/102388.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102388 -summary: Add support for `index_filter` to open pit -area: Search -type: enhancement -issues: - - 99740 diff --git a/docs/changelog/102391.yaml b/docs/changelog/102391.yaml deleted file mode 100644 index 5fcbb9e6d2858..0000000000000 --- a/docs/changelog/102391.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102391 -summary: "ESQL: Support the `_source` metadata field" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102417.yaml b/docs/changelog/102417.yaml deleted file mode 100644 index 09c1a4f49dbfd..0000000000000 --- a/docs/changelog/102417.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102417 -summary: "ESQL: emit warnings from single-value functions processing multi-values" -area: ES|QL -type: feature -issues: - - 98743 diff --git a/docs/changelog/102426.yaml b/docs/changelog/102426.yaml deleted file mode 100644 index 3aad50ed1eee0..0000000000000 --- a/docs/changelog/102426.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102426 -summary: Patterns support for allowed subjects by the JWT realm -area: Authentication -type: feature -issues: [] diff --git a/docs/changelog/102428.yaml b/docs/changelog/102428.yaml new file mode 100644 index 0000000000000..275492fa6a888 --- /dev/null +++ b/docs/changelog/102428.yaml @@ -0,0 +1,5 @@ +pr: 102428 +summary: "ESQL: Add option to drop null fields" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102434.yaml b/docs/changelog/102434.yaml deleted file mode 100644 index ab6aa886c13b1..0000000000000 --- a/docs/changelog/102434.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102434 -summary: "ESQL: Short circuit loading empty doc values" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102447.yaml b/docs/changelog/102447.yaml deleted file mode 100644 index 76823153670bd..0000000000000 --- a/docs/changelog/102447.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102447 -summary: Pass transform source query as `index_filter` to `open_point_in_time` request -area: Transform -type: enhancement -issues: - - 101049 diff --git a/docs/changelog/102456.yaml b/docs/changelog/102456.yaml deleted file mode 100644 index 6ef3b8f16f53c..0000000000000 --- a/docs/changelog/102456.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102456 -summary: Switch logs data streams to search all fields by default -area: Data streams -type: enhancement -issues: - - 99872 diff --git a/docs/changelog/102461.yaml b/docs/changelog/102461.yaml deleted file mode 100644 index c0c07554ed21f..0000000000000 --- a/docs/changelog/102461.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102461 -summary: Enable concurrency for scripted metric agg -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/102462.yaml b/docs/changelog/102462.yaml deleted file mode 100644 index d44ccc4cbbc5c..0000000000000 --- a/docs/changelog/102462.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102462 -summary: Check the real memory circuit breaker when building global ordinals -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/102472.yaml b/docs/changelog/102472.yaml deleted file mode 100644 index b0f5bfc714643..0000000000000 --- a/docs/changelog/102472.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102472 -summary: Expose the `invalidation` field in Get/Query `ApiKey` APIs -area: Security -type: enhancement -issues: [ ] diff --git a/docs/changelog/102476.yaml b/docs/changelog/102476.yaml deleted file mode 100644 index a53a20ecfec20..0000000000000 --- a/docs/changelog/102476.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102476 -summary: Unwrap `ExecutionException` when loading from cache in `AbstractIndexOrdinalsFieldData` -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/102490.yaml b/docs/changelog/102490.yaml deleted file mode 100644 index 8ff554ab0f0fe..0000000000000 --- a/docs/changelog/102490.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102490 -summary: "ESQL: Load text field from parent keyword field" -area: ES|QL -type: enhancement -issues: - - 102473 diff --git a/docs/changelog/102495.yaml b/docs/changelog/102495.yaml deleted file mode 100644 index 77ae42f7eebcb..0000000000000 --- a/docs/changelog/102495.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102495 -summary: "Add support for configuring proxy scheme in S3 client settings and EC2 discovery plugin" -area: Distributed -type: enhancement -issues: - - 101873 diff --git a/docs/changelog/102510.yaml b/docs/changelog/102510.yaml deleted file mode 100644 index 2b654b5c85929..0000000000000 --- a/docs/changelog/102510.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 102510 -summary: "ESQL: Make fieldcaps calls lighter" -area: ES|QL -type: enhancement -issues: - - 101763 - - 102393 diff --git a/docs/changelog/102511.yaml b/docs/changelog/102511.yaml deleted file mode 100644 index cf80ca03e197f..0000000000000 --- a/docs/changelog/102511.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102511 -summary: Trigger parent circuit breaker when building scorers in filters aggregation -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/102512.yaml b/docs/changelog/102512.yaml deleted file mode 100644 index d4bc765ecaf5f..0000000000000 --- a/docs/changelog/102512.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102512 -summary: Implement exponential backoff for transform state persistence retrying -area: Transform -type: enhancement -issues: - - 102528 diff --git a/docs/changelog/102562.yaml b/docs/changelog/102562.yaml deleted file mode 100644 index a4b4f5a095118..0000000000000 --- a/docs/changelog/102562.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102562 -summary: Track blocks of intermediate state of aggs -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102570.yaml b/docs/changelog/102570.yaml deleted file mode 100644 index 2d3f878dbbb27..0000000000000 --- a/docs/changelog/102570.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102570 -summary: Added `beat.stats.libbeat.pipeline.queue.max_events` -area: Monitoring -type: enhancement -issues: [] diff --git a/docs/changelog/102571.yaml b/docs/changelog/102571.yaml deleted file mode 100644 index 25272408161db..0000000000000 --- a/docs/changelog/102571.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102571 -summary: Allow executing multiple periodic flushes while they are being made durable -area: Store -type: enhancement -issues: [] diff --git a/docs/changelog/102598.yaml b/docs/changelog/102598.yaml deleted file mode 100644 index c32519acdf6d1..0000000000000 --- a/docs/changelog/102598.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102598 -summary: Add apm api for asynchronous counters (always increasing) -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/102602.yaml b/docs/changelog/102602.yaml deleted file mode 100644 index dd01eaa98b214..0000000000000 --- a/docs/changelog/102602.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102602 -summary: Consider search context missing exceptions as recoverable -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/102612.yaml b/docs/changelog/102612.yaml deleted file mode 100644 index 60808ae72801a..0000000000000 --- a/docs/changelog/102612.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102612 -summary: Track blocks when hashing single multi-valued field -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102636.yaml b/docs/changelog/102636.yaml deleted file mode 100644 index 8b32e0568b0fb..0000000000000 --- a/docs/changelog/102636.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102636 -summary: Revert non-semantic `NodeInfo` -area: Infra/Core -type: regression -issues: [] diff --git a/docs/changelog/102637.yaml b/docs/changelog/102637.yaml deleted file mode 100644 index 4d5d689934bd6..0000000000000 --- a/docs/changelog/102637.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102637 -summary: Improve stability of spike and dip detection for the change point aggregation -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102644.yaml b/docs/changelog/102644.yaml deleted file mode 100644 index 17c5cbebed7cc..0000000000000 --- a/docs/changelog/102644.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102644 -summary: Disable parallelism for composite agg against high cardinality fields -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/102673.yaml b/docs/changelog/102673.yaml deleted file mode 100644 index 16546edb3cf3c..0000000000000 --- a/docs/changelog/102673.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102673 -summary: "ESQL: Share constant null Blocks" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102680.yaml b/docs/changelog/102680.yaml deleted file mode 100644 index 8b32c5029ea2a..0000000000000 --- a/docs/changelog/102680.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102680 -summary: Make `api_key.delete.interval` a dynamic setting -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/102682.yaml b/docs/changelog/102682.yaml deleted file mode 100644 index 190ff3df5a7f6..0000000000000 --- a/docs/changelog/102682.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102682 -summary: Introduce fielddata cache ttl -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/102710.yaml b/docs/changelog/102710.yaml deleted file mode 100644 index ee805c70180a0..0000000000000 --- a/docs/changelog/102710.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102710 -summary: Enable concurrency for multi terms agg -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/102713.yaml b/docs/changelog/102713.yaml deleted file mode 100644 index 278d7d4ffb129..0000000000000 --- a/docs/changelog/102713.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102713 -summary: "ESQL: Add `profile` option" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102727.yaml b/docs/changelog/102727.yaml deleted file mode 100644 index 4f4d4fbf48899..0000000000000 --- a/docs/changelog/102727.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102727 -summary: "ESQL: Load stored fields sequentially" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102731.yaml b/docs/changelog/102731.yaml deleted file mode 100644 index a12e04bfab078..0000000000000 --- a/docs/changelog/102731.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102731 -summary: Add internal inference action for ml models an services -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102735.yaml b/docs/changelog/102735.yaml deleted file mode 100644 index 4726e08d1f314..0000000000000 --- a/docs/changelog/102735.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102735 -summary: "[Profiling] Report in status API if docs exist" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/102740.yaml b/docs/changelog/102740.yaml deleted file mode 100644 index b7fc10eb19ddb..0000000000000 --- a/docs/changelog/102740.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102740 -summary: "[Profiling] Notify early about task cancellation" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/102767.yaml b/docs/changelog/102767.yaml deleted file mode 100644 index cf1edeeb51265..0000000000000 --- a/docs/changelog/102767.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102767 -summary: "ESQL: remove `time_zone` request parameter" -area: ES|QL -type: bug -issues: - - 102159 diff --git a/docs/changelog/102806.yaml b/docs/changelog/102806.yaml deleted file mode 100644 index faa971ec1d879..0000000000000 --- a/docs/changelog/102806.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102806 -summary: Support for GET all models and by task type in the `_inference` API -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102808.yaml b/docs/changelog/102808.yaml deleted file mode 100644 index 4e3df80a28319..0000000000000 --- a/docs/changelog/102808.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102808 -summary: Active shards message corrected for search shards -area: Distributed -type: bug -issues: - - 101896 diff --git a/docs/changelog/102810.yaml b/docs/changelog/102810.yaml deleted file mode 100644 index f5faf7a321dbc..0000000000000 --- a/docs/changelog/102810.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102810 -summary: Add memory utilization Kibana metric to the monitoring index templates -area: Monitoring -type: enhancement -issues: [] diff --git a/docs/changelog/102811.yaml b/docs/changelog/102811.yaml deleted file mode 100644 index 039a337a53e87..0000000000000 --- a/docs/changelog/102811.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102811 -summary: Split comma-separated source index strings into separate indices -area: Transform -type: bug -issues: - - 99564 diff --git a/docs/changelog/102832.yaml b/docs/changelog/102832.yaml deleted file mode 100644 index 7daf22263b2e9..0000000000000 --- a/docs/changelog/102832.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102832 -summary: Disable concurrency for sampler and diversified sampler -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/102840.yaml b/docs/changelog/102840.yaml deleted file mode 100644 index 1d87cede632c9..0000000000000 --- a/docs/changelog/102840.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102840 -summary: Fail S3 repository analysis on partial reads -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/102844.yaml b/docs/changelog/102844.yaml deleted file mode 100644 index d05547c3aa9da..0000000000000 --- a/docs/changelog/102844.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102844 -summary: Skip global ordinals loading if query does not match after rewrite -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/102877.yaml b/docs/changelog/102877.yaml deleted file mode 100644 index da2de19b19a90..0000000000000 --- a/docs/changelog/102877.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102877 -summary: Add basic telelemetry for the inference feature -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102888.yaml b/docs/changelog/102888.yaml deleted file mode 100644 index 79ea9cbe712de..0000000000000 --- a/docs/changelog/102888.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102888 -summary: "Optimize `_count` type API requests" -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/102901.yaml b/docs/changelog/102901.yaml deleted file mode 100644 index ac417691b525c..0000000000000 --- a/docs/changelog/102901.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102901 -summary: Introduce local block factory -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102902.yaml b/docs/changelog/102902.yaml deleted file mode 100644 index b33afdd35a603..0000000000000 --- a/docs/changelog/102902.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102902 -summary: Fast path for reading single doc with ordinals -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102906.yaml b/docs/changelog/102906.yaml deleted file mode 100644 index 3efaa2db58390..0000000000000 --- a/docs/changelog/102906.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102906 -summary: Introduce a `StreamOutput` that counts how many bytes are written to the - stream -area: Distributed -type: enhancement -issues: [] diff --git a/docs/changelog/102916.yaml b/docs/changelog/102916.yaml deleted file mode 100644 index 3943f34d91221..0000000000000 --- a/docs/changelog/102916.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102916 -summary: Fix layout for MV_EXPAND -area: ES|QL -type: bug -issues: - - 102912 diff --git a/docs/changelog/102919.yaml b/docs/changelog/102919.yaml deleted file mode 100644 index 0de2e75abc6cf..0000000000000 --- a/docs/changelog/102919.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102919 -summary: Error log when license verification fails locally -area: License -type: bug -issues: [] diff --git a/docs/changelog/102925.yaml b/docs/changelog/102925.yaml deleted file mode 100644 index 5dd15f4f60429..0000000000000 --- a/docs/changelog/102925.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102925 -summary: Add ldap user metadata mappings for full name and email -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/102937.yaml b/docs/changelog/102937.yaml deleted file mode 100644 index 116fbadebe09d..0000000000000 --- a/docs/changelog/102937.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102937 -summary: "ESQL: New telemetry commands" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102944.yaml b/docs/changelog/102944.yaml deleted file mode 100644 index 58a1bb8f6bbaa..0000000000000 --- a/docs/changelog/102944.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102944 -summary: "If trained model download task is in progress, wait for it to finish before\ - \ executing start trained model deployment" -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/102967.yaml b/docs/changelog/102967.yaml deleted file mode 100644 index cdde735f6c077..0000000000000 --- a/docs/changelog/102967.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102967 -summary: "ES|QL: Improve resolution error management in `mv_expand`" -area: ES|QL -type: bug -issues: - - 102964 diff --git a/docs/changelog/102994.yaml b/docs/changelog/102994.yaml deleted file mode 100644 index c35baaefcb723..0000000000000 --- a/docs/changelog/102994.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102994 -summary: Enable Connectors API as technical preview -area: Application -type: feature -issues: [] diff --git a/docs/changelog/103013.yaml b/docs/changelog/103013.yaml deleted file mode 100644 index bb8eb99088856..0000000000000 --- a/docs/changelog/103013.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103013 -summary: Deprecate the unused `elasticsearch_version` field of enrich policy json -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/103024.yaml b/docs/changelog/103024.yaml deleted file mode 100644 index e860ad056f980..0000000000000 --- a/docs/changelog/103024.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103024 -summary: Fix template simulate setting application ordering -area: Indices APIs -type: bug -issues: - - 103008 diff --git a/docs/changelog/103061.yaml b/docs/changelog/103061.yaml deleted file mode 100644 index 558429493ac6f..0000000000000 --- a/docs/changelog/103061.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103061 -summary: "[Profiling] Query in parallel only if beneficial" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/103116.yaml b/docs/changelog/103116.yaml deleted file mode 100644 index 402c83e16ec37..0000000000000 --- a/docs/changelog/103116.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103116 -summary: Fix `frequent_item_sets` aggregation on empty index -area: Machine Learning -type: bug -issues: - - 103067 diff --git a/docs/changelog/103124.yaml b/docs/changelog/103124.yaml deleted file mode 100644 index 078c8249bbf5d..0000000000000 --- a/docs/changelog/103124.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103124 -summary: Start a new trace context before loading a trained model -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/103134.yaml b/docs/changelog/103134.yaml new file mode 100644 index 0000000000000..13bb0323645f5 --- /dev/null +++ b/docs/changelog/103134.yaml @@ -0,0 +1,5 @@ +pr: 103134 +summary: CCS with `minimize_roundtrips` performs incremental merges of each `SearchResponse` +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/103150.yaml b/docs/changelog/103150.yaml deleted file mode 100644 index 3f42c882d89fb..0000000000000 --- a/docs/changelog/103150.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103150 -summary: "ES|QL: Fix NPE on single value detection" -area: ES|QL -type: bug -issues: - - 103141 diff --git a/docs/changelog/103183.yaml b/docs/changelog/103183.yaml deleted file mode 100644 index cb28033cff6a7..0000000000000 --- a/docs/changelog/103183.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103183 -summary: "[Connectors API] Handle nullable fields correctly in the `ConnectorSyncJob`\ - \ parser" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/103185.yaml b/docs/changelog/103185.yaml deleted file mode 100644 index 3a1a4960ba98c..0000000000000 --- a/docs/changelog/103185.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103185 -summary: Fix format string in `OldLuceneVersions` -area: Search -type: bug -issues: [] diff --git a/docs/changelog/103203.yaml b/docs/changelog/103203.yaml deleted file mode 100644 index d2aa3e9961c6a..0000000000000 --- a/docs/changelog/103203.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103203 -summary: Fix NPE & empty result handling in `CountOnlyQueryPhaseResultConsumer` -area: Search -type: bug -issues: [] diff --git a/docs/changelog/103209.yaml b/docs/changelog/103209.yaml deleted file mode 100644 index 05ae8c13bcb5c..0000000000000 --- a/docs/changelog/103209.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103209 -summary: "ESQL: Fix `to_degrees()` returning infinity" -area: ES|QL -type: bug -issues: - - 102987 diff --git a/docs/changelog/103212.yaml b/docs/changelog/103212.yaml deleted file mode 100644 index 3cbbddc8f2229..0000000000000 --- a/docs/changelog/103212.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103212 -summary: Use the eql query filter for the open-pit request -area: EQL -type: enhancement -issues: [] diff --git a/docs/changelog/103251.yaml b/docs/changelog/103251.yaml deleted file mode 100644 index 0c5c6d6e4d776..0000000000000 --- a/docs/changelog/103251.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103251 -summary: Wait for reroute before acking put-shutdown -area: Infra/Node Lifecycle -type: bug -issues: [] diff --git a/docs/changelog/103339.yaml b/docs/changelog/103339.yaml deleted file mode 100644 index 6ea1ab0cf799a..0000000000000 --- a/docs/changelog/103339.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103339 -summary: "ESQL: Fix resolution of MV_EXPAND after KEEP *" -area: ES|QL -type: bug -issues: - - 103331 diff --git a/docs/changelog/103342.yaml b/docs/changelog/103342.yaml deleted file mode 100644 index 32711d7a6b390..0000000000000 --- a/docs/changelog/103342.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103342 -summary: Use dataset size instead of on-disk size for data stream stats -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/103361.yaml b/docs/changelog/103361.yaml deleted file mode 100644 index 441acc09895ef..0000000000000 --- a/docs/changelog/103361.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103361 -summary: Prevent attempts to access non-existent node information during rebalancing -area: Machine Learning -type: bug -issues: [ ] diff --git a/docs/changelog/103399.yaml b/docs/changelog/103399.yaml new file mode 100644 index 0000000000000..440ac90b313f5 --- /dev/null +++ b/docs/changelog/103399.yaml @@ -0,0 +1,6 @@ +pr: 103399 +summary: "add validation on _id field when upsert new doc" +area: Search +type: bug +issues: + - 102981 diff --git a/docs/changelog/103408.yaml b/docs/changelog/103408.yaml deleted file mode 100644 index bf5081b854f08..0000000000000 --- a/docs/changelog/103408.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103408 -summary: Cache component versions -area: Infra/Core -type: bug -issues: - - 102103 diff --git a/docs/changelog/103427.yaml b/docs/changelog/103427.yaml deleted file mode 100644 index 57a27aa687ab7..0000000000000 --- a/docs/changelog/103427.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103427 -summary: "[Connector API] Fix bug with nullable tooltip field in parser" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/103430.yaml b/docs/changelog/103430.yaml deleted file mode 100644 index cd2444270849d..0000000000000 --- a/docs/changelog/103430.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103430 -summary: "[Connectors API] Fix bug with missing TEXT `DisplayType` enum" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/103435.yaml b/docs/changelog/103435.yaml deleted file mode 100644 index 95e3c7169ada9..0000000000000 --- a/docs/changelog/103435.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103435 -summary: Dispatch `ClusterStateAction#buildResponse` to executor -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/103508.yaml b/docs/changelog/103508.yaml deleted file mode 100644 index 9c6f79ef75657..0000000000000 --- a/docs/changelog/103508.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103508 -summary: "[Connectors API] Fix `ClassCastException` when creating a new sync job" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/103530.yaml b/docs/changelog/103530.yaml deleted file mode 100644 index 6feb04467b03e..0000000000000 --- a/docs/changelog/103530.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103530 -summary: Exclude quantiles when fetching model snapshots where possible -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/103546.yaml b/docs/changelog/103546.yaml deleted file mode 100644 index 08584e8555bd4..0000000000000 --- a/docs/changelog/103546.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103546 -summary: Handle timeout on standalone rewrite calls -area: Search -type: bug -issues: [] diff --git a/docs/changelog/103574.yaml b/docs/changelog/103574.yaml deleted file mode 100644 index ed6ad237f49a2..0000000000000 --- a/docs/changelog/103574.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103574 -summary: Samples should check if the aggregations result is empty or null -area: EQL -type: bug -issues: [] diff --git a/docs/changelog/103580.yaml b/docs/changelog/103580.yaml deleted file mode 100644 index 6fd0328017d1f..0000000000000 --- a/docs/changelog/103580.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103580 -summary: Copy counter field properties to downsampled index -area: Downsampling -type: bug -issues: - - 103569 diff --git a/docs/changelog/103591.yaml b/docs/changelog/103591.yaml deleted file mode 100644 index 41b6e362c5713..0000000000000 --- a/docs/changelog/103591.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103591 -summary: Wait for the model results on graceful shutdown -area: Machine Learning -type: bug -issues: - - 103414 diff --git a/docs/changelog/103601.yaml b/docs/changelog/103601.yaml deleted file mode 100644 index bf7aaaf835e00..0000000000000 --- a/docs/changelog/103601.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 103601 -summary: Introduce Elasticsearch `PostingFormat` based on Lucene 90 positing format - using PFOR -area: Search -type: bug -issues: - - 103002 diff --git a/docs/changelog/103615.yaml b/docs/changelog/103615.yaml deleted file mode 100644 index 69498c749687f..0000000000000 --- a/docs/changelog/103615.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103615 -summary: Fix downsample api by returning a failure in case one or more downsample persistent tasks failed -area: Downsampling -type: bug -issues: [] diff --git a/docs/changelog/103670.yaml b/docs/changelog/103670.yaml deleted file mode 100644 index ad3f0519b5d19..0000000000000 --- a/docs/changelog/103670.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103670 -summary: "ESQL: Improve local folding of aggregates" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/103690.yaml b/docs/changelog/103690.yaml deleted file mode 100644 index fa9076789c1cd..0000000000000 --- a/docs/changelog/103690.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103690 -summary: Restore inter-segment search concurrency with synthetic source is enabled -area: Search -type: bug -issues: [] diff --git a/docs/changelog/103763.yaml b/docs/changelog/103763.yaml new file mode 100644 index 0000000000000..e4d6556c77077 --- /dev/null +++ b/docs/changelog/103763.yaml @@ -0,0 +1,6 @@ +pr: 103763 +summary: Ref count search response bytes +area: Search +type: enhancement +issues: + - 102657 diff --git a/docs/changelog/103873.yaml b/docs/changelog/103873.yaml deleted file mode 100644 index 937106043ecf4..0000000000000 --- a/docs/changelog/103873.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103873 -summary: Catch exceptions during `pytorch_inference` startup -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/103923.yaml b/docs/changelog/103923.yaml deleted file mode 100644 index 80e6880909f3a..0000000000000 --- a/docs/changelog/103923.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103923 -summary: Preserve response headers in Datafeed preview -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/104006.yaml b/docs/changelog/104006.yaml new file mode 100644 index 0000000000000..d840502cdefbe --- /dev/null +++ b/docs/changelog/104006.yaml @@ -0,0 +1,5 @@ +pr: 104006 +summary: Add support for more than one `inner_hit` when searching nested vectors +area: Vector Search +type: enhancement +issues: [] diff --git a/docs/changelog/104029.yaml b/docs/changelog/104029.yaml deleted file mode 100644 index 2b74d3b634dba..0000000000000 --- a/docs/changelog/104029.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104029 -summary: '`AsyncOperator#isFinished` must never return true on failure' -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/104033.yaml b/docs/changelog/104033.yaml new file mode 100644 index 0000000000000..d3e167665732c --- /dev/null +++ b/docs/changelog/104033.yaml @@ -0,0 +1,5 @@ +pr: 104033 +summary: Add Query Users API +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/104046.yaml b/docs/changelog/104046.yaml deleted file mode 100644 index 9b383611b560a..0000000000000 --- a/docs/changelog/104046.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104046 -summary: "ESQL: Update the use of some user-caused exceptions" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/104051.yaml b/docs/changelog/104051.yaml deleted file mode 100644 index 1aa6d69f5ae20..0000000000000 --- a/docs/changelog/104051.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104051 -summary: Fix NPE that is thrown by `_update` API -area: Transform -type: bug -issues: - - 104048 diff --git a/docs/changelog/104087.yaml b/docs/changelog/104087.yaml new file mode 100644 index 0000000000000..614e2d0de7e58 --- /dev/null +++ b/docs/changelog/104087.yaml @@ -0,0 +1,13 @@ +pr: 104087 +summary: Deprecate machine learning on Intel macOS +area: Machine Learning +type: deprecation +issues: [] +deprecation: + title: Deprecate machine learning on Intel macOS + area: Packaging + details: The machine learning plugin will be permanently disabled on macOS x86_64 + in new minor versions released from December 2024 onwards. + impact: To continue to use machine learning functionality on macOS please switch to + an arm64 machine (Apple silicon). Alternatively, it will still be possible to run + Elasticsearch with machine learning enabled in a Docker container on macOS x86_64. diff --git a/docs/changelog/104182.yaml b/docs/changelog/104182.yaml new file mode 100644 index 0000000000000..b5cf10f941cc6 --- /dev/null +++ b/docs/changelog/104182.yaml @@ -0,0 +1,5 @@ +pr: 104182 +summary: "Apm-data: fix `@custom` component templates" +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/104269.yaml b/docs/changelog/104269.yaml new file mode 100644 index 0000000000000..8d4b0fc5d5198 --- /dev/null +++ b/docs/changelog/104269.yaml @@ -0,0 +1,5 @@ +pr: 104269 +summary: "ESQL: Support loading shapes from source into WKB blocks" +area: "ES|QL" +type: enhancement +issues: [] diff --git a/docs/changelog/104355.yaml b/docs/changelog/104355.yaml new file mode 100644 index 0000000000000..2a100faf3c35f --- /dev/null +++ b/docs/changelog/104355.yaml @@ -0,0 +1,5 @@ +pr: 104355 +summary: Prepare enrich plan to support multi clusters +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/104386.yaml b/docs/changelog/104386.yaml new file mode 100644 index 0000000000000..41b6a17424bbd --- /dev/null +++ b/docs/changelog/104386.yaml @@ -0,0 +1,6 @@ +pr: 104386 +summary: "X-pack/plugin/apm-data: add dynamic setting for enabling template registry" +area: Data streams +type: enhancement +issues: + - 104385 diff --git a/docs/changelog/104387.yaml b/docs/changelog/104387.yaml new file mode 100644 index 0000000000000..f10084d8c4b32 --- /dev/null +++ b/docs/changelog/104387.yaml @@ -0,0 +1,6 @@ +pr: 104387 +summary: "ESQL: Nested expressions inside stats command" +area: ES|QL +type: enhancement +issues: + - 99828 diff --git a/docs/changelog/104396.yaml b/docs/changelog/104396.yaml new file mode 100644 index 0000000000000..586fdc1b22624 --- /dev/null +++ b/docs/changelog/104396.yaml @@ -0,0 +1,5 @@ +pr: 104396 +summary: Report current master in `PeerFinder` +area: Cluster Coordination +type: enhancement +issues: [] diff --git a/docs/changelog/104406.yaml b/docs/changelog/104406.yaml new file mode 100644 index 0000000000000..d26ef664abc07 --- /dev/null +++ b/docs/changelog/104406.yaml @@ -0,0 +1,5 @@ +pr: 104406 +summary: Support patch transport version from 8.12 +area: Downsampling +type: enhancement +issues: [] diff --git a/docs/changelog/104253.yaml b/docs/changelog/104407.yaml similarity index 91% rename from docs/changelog/104253.yaml rename to docs/changelog/104407.yaml index bacde751e2507..1ce6b6f97f580 100644 --- a/docs/changelog/104253.yaml +++ b/docs/changelog/104407.yaml @@ -1,4 +1,4 @@ -pr: 104253 +pr: 104407 summary: Set read timeout for fetching IMDSv2 token area: Discovery-Plugins type: enhancement diff --git a/docs/changelog/104418.yaml b/docs/changelog/104418.yaml new file mode 100644 index 0000000000000..d27b66cebea87 --- /dev/null +++ b/docs/changelog/104418.yaml @@ -0,0 +1,6 @@ +pr: 104418 +summary: Fix `routing_path` when template has multiple `path_match` and multi-fields +area: TSDB +type: bug +issues: + - 104400 diff --git a/docs/changelog/104460.yaml b/docs/changelog/104460.yaml new file mode 100644 index 0000000000000..c92acdd5cb8ad --- /dev/null +++ b/docs/changelog/104460.yaml @@ -0,0 +1,5 @@ +pr: 104460 +summary: Dyamically adjust node metrics cache expire +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/104481.yaml b/docs/changelog/104481.yaml new file mode 100644 index 0000000000000..5377efdc7109e --- /dev/null +++ b/docs/changelog/104481.yaml @@ -0,0 +1,6 @@ +pr: 104481 +summary: Upgrade bundled JDK to 21.0.2 +area: Packaging +type: upgrade +issues: + - 4449 diff --git a/docs/changelog/104500.yaml b/docs/changelog/104500.yaml new file mode 100644 index 0000000000000..61c45c6dde3cb --- /dev/null +++ b/docs/changelog/104500.yaml @@ -0,0 +1,5 @@ +pr: 104500 +summary: Thread pool metrics +area: Infra/Core +type: enhancement +issues: [] diff --git a/docs/changelog/104501.yaml b/docs/changelog/104501.yaml new file mode 100644 index 0000000000000..49bbe942be79a --- /dev/null +++ b/docs/changelog/104501.yaml @@ -0,0 +1,5 @@ +pr: 104501 +summary: "X-pack/plugin/apm-data: download geoip DB on pipeline creation" +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/96968.yaml b/docs/changelog/96968.yaml deleted file mode 100644 index 8cc6d4ac4c284..0000000000000 --- a/docs/changelog/96968.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 96968 -summary: Allow prefix index naming while reindexing from remote -area: Reindex -type: bug -issues: - - 89120 diff --git a/docs/changelog/98874.yaml b/docs/changelog/98874.yaml deleted file mode 100644 index e3eb7b5acc63f..0000000000000 --- a/docs/changelog/98874.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98874 -summary: Estimate the memory required to deploy trained models more accurately -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/98882.yaml b/docs/changelog/98882.yaml deleted file mode 100644 index 9867f098cfd13..0000000000000 --- a/docs/changelog/98882.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99983 -summary: Use non-deprecated SAML callback URL in tests -area: Authorization -type: enhancement -issues: - - 99985 diff --git a/docs/changelog/98883.yaml b/docs/changelog/98883.yaml deleted file mode 100644 index a8525a432d142..0000000000000 --- a/docs/changelog/98883.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99983 -summary: Use non-deprecated SAML callback URL in SAML smoketests -area: Authorization -type: enhancement -issues: - - 99986 diff --git a/docs/changelog/98916.yaml b/docs/changelog/98916.yaml deleted file mode 100644 index a466e3deba009..0000000000000 --- a/docs/changelog/98916.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98916 -summary: Make knn search a query -area: Vector Search -type: feature -issues: [] diff --git a/docs/changelog/99134.yaml b/docs/changelog/99134.yaml deleted file mode 100644 index 10156b9b30066..0000000000000 --- a/docs/changelog/99134.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99134 -summary: Add ability to create a data stream failure store -area: Data streams -type: feature -issues: [] diff --git a/docs/changelog/99445.yaml b/docs/changelog/99445.yaml deleted file mode 100644 index deea5fbf2423c..0000000000000 --- a/docs/changelog/99445.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99445 -summary: Make cosine similarity faster by storing magnitude and normalizing vectors -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/99702.yaml b/docs/changelog/99702.yaml deleted file mode 100644 index 657ff34e045a8..0000000000000 --- a/docs/changelog/99702.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99702 -summary: Making classname optional in Transport protocol -area: Infra/Plugins -type: bug -issues: - - 98584 diff --git a/docs/changelog/99752.yaml b/docs/changelog/99752.yaml deleted file mode 100644 index c137a563bea39..0000000000000 --- a/docs/changelog/99752.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99752 -summary: Pass shard's primary term to Engine#addSegmentGenerationListener -area: Store -type: enhancement -issues: [] diff --git a/docs/changelog/99852.yaml b/docs/changelog/99852.yaml deleted file mode 100644 index 3a26f17737ae8..0000000000000 --- a/docs/changelog/99852.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99852 -summary: Record more detailed HTTP stats -area: Network -type: enhancement -issues: [] diff --git a/docs/changelog/99963.yaml b/docs/changelog/99963.yaml deleted file mode 100644 index 4f03dceeb22aa..0000000000000 --- a/docs/changelog/99963.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99963 -summary: Aggs error codes part 1 -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/99975.yaml b/docs/changelog/99975.yaml deleted file mode 100644 index a34746c27ec99..0000000000000 --- a/docs/changelog/99975.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99975 -summary: Rename component templates and pipelines according to the new naming conventions -area: Indices APIs -type: enhancement -issues: [] diff --git a/docs/changelog/99984.yaml b/docs/changelog/99984.yaml deleted file mode 100644 index 254845591941d..0000000000000 --- a/docs/changelog/99984.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99984 -summary: Switch fleet's built-in ILM policies to use .actions.rollover.max_primary_shard_size -area: ILM+SLM -type: enhancement -issues: - - 99983 diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc index bbfa41538528a..e1e27be12a36f 100644 --- a/docs/reference/esql/esql-query-api.asciidoc +++ b/docs/reference/esql/esql-query-api.asciidoc @@ -43,6 +43,12 @@ or alias you search. (Optional, string) Separator for CSV results. Defaults to `,`. The API only supports this parameter for CSV responses. +`drop_null_columns`:: +(Optional, boolean) Should columns that are entirely `null` be removed from +the `columns` and `values` portion of the results? Defaults to `false`. If +`true` the the response will include an extra section under the name +`all_columns` which has the name of all columns. + `format`:: (Optional, string) Format for the response. For valid values, refer to <>. @@ -75,17 +81,12 @@ responses. See <>. `columns`:: (array of objects) -Column headings for the search results. Each object is a column. -+ -.Properties of `columns` objects -[%collapsible%open] -===== -`name`:: -(string) Name of the column. - -`type`:: -(string) Data type for the column. -===== +Column `name` and `type` for each column returned in `values`. Each object is a single column. + +`all_columns`:: +(array of objects) +Column `name` and `type` for each queried column. Each object is a single column. This is only +returned if `drop_null_columns` is sent with the request. `rows`:: (array of arrays) diff --git a/docs/reference/esql/functions/types/mv_count.asciidoc b/docs/reference/esql/functions/types/mv_count.asciidoc index 440e66d11096e..a2e7119bab05d 100644 --- a/docs/reference/esql/functions/types/mv_count.asciidoc +++ b/docs/reference/esql/functions/types/mv_count.asciidoc @@ -3,9 +3,11 @@ v | result boolean | integer cartesian_point | integer +cartesian_shape | integer datetime | integer double | integer geo_point | integer +geo_shape | integer integer | integer ip | integer keyword | integer diff --git a/docs/reference/esql/functions/types/mv_first.asciidoc b/docs/reference/esql/functions/types/mv_first.asciidoc index e6c67a454b96b..620c7cf13b771 100644 --- a/docs/reference/esql/functions/types/mv_first.asciidoc +++ b/docs/reference/esql/functions/types/mv_first.asciidoc @@ -3,9 +3,11 @@ v | result boolean | boolean cartesian_point | cartesian_point +cartesian_shape | cartesian_shape datetime | datetime double | double geo_point | geo_point +geo_shape | geo_shape integer | integer ip | ip keyword | keyword diff --git a/docs/reference/esql/functions/types/mv_last.asciidoc b/docs/reference/esql/functions/types/mv_last.asciidoc index e6c67a454b96b..620c7cf13b771 100644 --- a/docs/reference/esql/functions/types/mv_last.asciidoc +++ b/docs/reference/esql/functions/types/mv_last.asciidoc @@ -3,9 +3,11 @@ v | result boolean | boolean cartesian_point | cartesian_point +cartesian_shape | cartesian_shape datetime | datetime double | double geo_point | geo_point +geo_shape | geo_shape integer | integer ip | ip keyword | keyword diff --git a/docs/reference/esql/functions/types/to_string.asciidoc b/docs/reference/esql/functions/types/to_string.asciidoc index 4de4af735b07f..773e396f41373 100644 --- a/docs/reference/esql/functions/types/to_string.asciidoc +++ b/docs/reference/esql/functions/types/to_string.asciidoc @@ -3,9 +3,11 @@ v | result boolean | keyword cartesian_point | keyword +cartesian_shape | keyword datetime | keyword double | keyword geo_point | keyword +geo_shape | keyword integer | keyword ip | keyword keyword | keyword diff --git a/docs/reference/esql/multivalued-fields.asciidoc b/docs/reference/esql/multivalued-fields.asciidoc index 6cb7755b91ce9..871a741d5ee24 100644 --- a/docs/reference/esql/multivalued-fields.asciidoc +++ b/docs/reference/esql/multivalued-fields.asciidoc @@ -201,8 +201,8 @@ POST /_query "columns": [ { "name": "a", "type": "long"}, { "name": "b", "type": "long"}, - { "name": "b+2", "type": "long"}, - { "name": "a+b", "type": "long"} + { "name": "b + 2", "type": "long"}, + { "name": "a + b", "type": "long"} ], "values": [ [1, [1, 2], null, null], @@ -236,8 +236,8 @@ POST /_query "columns": [ { "name": "a", "type": "long"}, { "name": "b", "type": "long"}, - { "name": "b+2", "type": "long"}, - { "name": "a+b", "type": "long"} + { "name": "b + 2", "type": "long"}, + { "name": "a + b", "type": "long"} ], "values": [ [1, 1, 3, 2], diff --git a/docs/reference/ingest/processors.asciidoc b/docs/reference/ingest/processors.asciidoc index 4132773e3d427..8622e0b98602c 100644 --- a/docs/reference/ingest/processors.asciidoc +++ b/docs/reference/ingest/processors.asciidoc @@ -4,7 +4,15 @@ Processor reference ++++ -{es} includes several configurable processors. To get a list of available +An <> is made up of a sequence of processors that are applied to documents as they are ingested into an index. +Each processor performs a specific task, such as filtering, transforming, or enriching data. + +Each successive processor depends on the output of the previous processor, so the order of processors is important. +The modified documents are indexed into {es} after all processors are applied. + +{es} includes over 40 configurable processors. +The subpages in this section contain reference documentation for each processor. +To get a list of available processors, use the <> API. [source,console] @@ -12,11 +20,191 @@ processors, use the <> API. GET _nodes/ingest?filter_path=nodes.*.ingest.processors ---- -The pages in this section contain reference documentation for each processor. +[discrete] +[[ingest-processors-categories]] +=== Ingest processors by category + +We've categorized the available processors on this page and summarized their functions. +This will help you find the right processor for your use case. + +* <> +* <> +* <> +* <> +* <> + +[discrete] +[[ingest-process-category-data-enrichment]] +=== Data enrichment processors + +[discrete] +[[ingest-process-category-data-enrichment-general]] +==== General outcomes + +<>:: +Appends a value to a field. + +<>:: +Points documents to the right time-based index based on a date or timestamp field. + +<>:: +Enriches documents with data from another index. +[TIP] +==== +Refer to <> for detailed examples of how to use the `enrich` processor to add data from your existing indices to incoming documents during ingest. +==== + +<>:: +Uses {ml} to classify and tag text fields. + +[discrete] +[[ingest-process-category-data-enrichment-specific]] +==== Specific outcomes + +<>:: +Parses and indexes binary data, such as PDFs and Word documents. + +<>:: +Converts a location field to a Geo-Point field. + +<>:: +Computes the Community ID for network flow data. + +<>:: +Computes a hash of the document’s content. + +<>:: +Converts geo-grid definitions of grid tiles or cells to regular bounding boxes or polygons which describe their shape. + +<>:: +Adds information about the geographical location of an IPv4 or IPv6 address. + +<>:: +Calculates the network direction given a source IP address, destination IP address, and a list of internal networks. + +<>:: +Extracts the registered domain (also known as the effective top-level domain or eTLD), sub-domain, and top-level domain from a fully qualified domain name (FQDN). + +<>:: +Sets user-related details (such as `username`, `roles`, `email`, `full_name`,`metadata`, `api_key`, `realm` and `authentication_type`) from the current authenticated user to the current document by pre-processing the ingest. + +<>:: +Parses a Uniform Resource Identifier (URI) string and extracts its components as an object. + +<>:: +URL-decodes a string. + +<>:: +Parses user-agent strings to extract information about web clients. + +[discrete] +[[ingest-process-category-data-transformation]] +=== Data transformation processors + +[discrete] +[[ingest-process-category-data-transformation-general]] +==== General outcomes + +<>:: +Converts a field in the currently ingested document to a different type, such as converting a string to an integer. + +<>:: +Extracts structured fields out of a single text field within a document. +Unlike the <>, dissect does not use regular expressions. +This makes the dissect's a simpler and often faster alternative. + +<>:: +Extracts structured fields out of a single text field within a document, using the <> regular expression dialect that supports reusable aliased expressions. + +<>:: +Converts a string field by applying a regular expression and a replacement. + +<>:: +Uses the <> rules engine to obscure text in the input document matching the given Grok patterns. + +<>:: +Renames an existing field. + +<>:: +Sets a value on a field. + +[discrete] +[[ingest-process-category-data-transformation-specific]] +==== Specific outcomes + +<>:: +Converts a human-readable byte value to its value in bytes (for example `1kb` becomes `1024`). + +<>:: +Extracts a single line of CSV data from a text field. + +<>:: +Extracts and converts date fields. + +<> processor:: +Expands a field with dots into an object field. + +<>:: +Removes HTML tags from a field. + +<>:: +Joins each element of an array into a single string using a separator character between each element. + +<>:: +Parse messages (or specific event fields) containing key-value pairs. + +<> and <>:: +Converts a string field to lowercase or uppercase. + +<>:: +Splits a field into an array of values. + +<>:: +Trims whitespace from field. + +[discrete] +[[ingest-process-category-data-filtering]] +=== Data filtering processors + +<>:: +Drops the document without raising any errors. + +<>:: +Removes fields from documents. + +[discrete] +[[ingest-process-category-pipeline-handling]] +=== Pipeline handling processors + +<>:: +Raises an exception. Useful for when you expect a pipeline to fail and want to relay a specific message to the requester. + +<>:: +Executes another pipeline. + +<>:: +Reroutes documents to another target index or data stream. + +[discrete] +[[ingest-process-category-array-json-handling]] +=== Array/JSON handling processors + +<>:: +Runs an ingest processor on each element of an array or object. + +<>:: +Converts a JSON string into a structured JSON object. + +<>:: +Runs an inline or stored <> on incoming documents. +The script runs in the {painless}/painless-ingest-processor-context.html[painless `ingest` context]. + +<>:: +Sorts the elements of an array in ascending or descending order. [discrete] [[ingest-process-plugins]] -=== Processor plugins +=== Add additional processors You can install additional processors as {plugins}/ingest.html[plugins]. diff --git a/docs/reference/migration/migrate_8_12.asciidoc b/docs/reference/migration/migrate_8_12.asciidoc index d241a35c686d7..48e45de350890 100644 --- a/docs/reference/migration/migrate_8_12.asciidoc +++ b/docs/reference/migration/migrate_8_12.asciidoc @@ -16,5 +16,62 @@ coming::[8.12.0] [[breaking-changes-8.12]] === Breaking changes -There are no breaking changes in {es} 8.12. +There are no breaking changes in 8.12 + +[discrete] +[[notable-changes-8.12]] +=== Notable changes + +There are notable changes in 8.12 that you need to be aware of, items that we may consider as notable changes are + +* Changes to features that are in Technical Preview. +* Changes to log formats. +* Changes to non public APIs. +* Behaviour changes that repair critical bugs. + + +[discrete] +[[breaking_812_authorization_changes]] +==== Authorization changes + +[[fixed_jwt_principal_from_claims]] +.Fixed JWT principal from claims +[%collapsible] +==== +*Details* + +This changes the format of a JWT's principal before the JWT is actually validated by any JWT realm. The JWT's principal is a convenient way to refer to a JWT that has not yet been verified by a JWT realm. The JWT's principal is printed in the audit and regular logs (notably for auditing authn failures) as well as the smart realm chain reordering optimization. The JWT principal is NOT required to be identical to the JWT-authenticated user's principal, but in general, they should be similar. Previously, the JWT's principal was built by individual realms in the same way the realms built the authenticated user's principal. This had the advantage that, in simpler JWT realms configurations (e.g. a single JWT realm in the chain), the JWT principal and the authenticated user's principal are very similar. However the drawback is that, in general, the JWT principal and the user principal can be very different (i.e. in the case where one JWT realm builds the JWT principal and a different one builds the user principal). Another downside is that the (unauthenticated) JWT principal depended on realm ordering, which makes identifying the JWT from its principal dependent on the ES authn realm configuration. This PR implements a consistent fixed logic to build the JWT principal, which only depends on the JWT's claims and no ES configuration. + +*Impact* + +Users will observe changed format and values for the `user.name` attribute of `authentication_failed` audit log events, in the JWT (failed) authn case. +==== + +[discrete] +[[breaking_812_java_api_changes]] +==== Java API changes + +[[plugin_createcomponents_method_has_been_refactored_to_take_single_pluginservices_object]] +.Plugin.createComponents method has been refactored to take a single PluginServices object +[%collapsible] +==== +*Details* + +Plugin.createComponents currently takes several different service arguments. The signature of this method changes every time a new service is added. The method has now been modified to take a single interface object that new services are added to. This will reduce API incompatibility issues when a new service is introduced in the future. + +*Impact* + +Plugins that override createComponents will need to be refactored to override the new method on ES 8.12+ +==== + +[discrete] +[[breaking_812_rest_api_changes]] +==== REST API changes + +[[es_ql_pow_function_always_returns_double]] +.[ES|QL] pow function always returns double +[%collapsible] +==== +*Details* + +In ES|QL, the pow function no longer returns the type of its inputs, instead always returning a double. + +*Impact* + +low. Most queries should continue to function with the change. +==== diff --git a/docs/reference/release-notes/8.12.0.asciidoc b/docs/reference/release-notes/8.12.0.asciidoc index 6355b7c5135db..21941b265ebd2 100644 --- a/docs/reference/release-notes/8.12.0.asciidoc +++ b/docs/reference/release-notes/8.12.0.asciidoc @@ -1,8 +1,400 @@ [[release-notes-8.12.0]] == {es} version 8.12.0 -coming[8.12.0] - Also see <>. +[[breaking-8.12.0]] +[float] +=== Breaking and notable changes + +Authorization:: +* Fixed JWT principal from claims {es-pull}101333[#101333] + +ES|QL:: +* [ES|QL] pow function always returns double {es-pull}102183[#102183] (issue: {es-issue}99055[#99055]) + +Infra/Plugins:: +* Remove Plugin.createComponents method in favour of overload with a PluginServices object {es-pull}101457[#101457] + +[[bug-8.12.0]] +[float] +=== Bug fixes + +Aggregations:: +* Adjust Histogram's bucket accounting to be iteratively {es-pull}102172[#102172] +* Aggs error codes part 1 {es-pull}99963[#99963] +* Skip global ordinals loading if query does not match after rewrite {es-pull}102844[#102844] +* Trigger parent circuit breaker when building scorers in filters aggregation {es-pull}102511[#102511] +* Unwrap `ExecutionException` when loading from cache in `AbstractIndexOrdinalsFieldData` {es-pull}102476[#102476] + +Application:: +* [Connector API] Fix bug in configuration validation parser {es-pull}104198[#104198] +* [Connector API] Fix bug with nullable tooltip field in parser {es-pull}103427[#103427] +* [Connectors API] Fix `ClassCastException` when creating a new sync job {es-pull}103508[#103508] +* [Connectors API] Fix bug with missing TEXT `DisplayType` enum {es-pull}103430[#103430] +* [Connectors API] Handle nullable fields correctly in the `ConnectorSyncJob` parser {es-pull}103183[#103183] +* [Profiling] Query in parallel only if beneficial {es-pull}103061[#103061] +* [Search Applications] Return 400 response when template rendering produces invalid JSON {es-pull}101474[#101474] + +Authentication:: +* Fall through malformed JWTs to subsequent realms in the chain {es-pull}101660[#101660] (issue: {es-issue}101367[#101367]) + +Authorization:: +* Fix cache invalidation on privilege modification {es-pull}102193[#102193] + +Data streams:: +* Use dataset size instead of on-disk size for data stream stats {es-pull}103342[#103342] + +Distributed:: +* Active shards message corrected for search shards {es-pull}102808[#102808] (issue: {es-issue}101896[#101896]) +* Dispatch `ClusterStateAction#buildResponse` to executor {es-pull}103435[#103435] +* Fix listeners in `SharedBlobCacheService.readMultiRegions` {es-pull}101727[#101727] + +Downsampling:: +* Copy counter field properties to downsampled index {es-pull}103580[#103580] (issue: {es-issue}103569[#103569]) +* Fix downsample api by returning a failure in case one or more downsample persistent tasks failed {es-pull}103615[#103615] + +EQL:: +* Cover head/tail commands edge cases and data types coverage {es-pull}101859[#101859] (issue: {es-issue}101724[#101724]) +* Samples should check if the aggregations result is empty or null {es-pull}103574[#103574] + +ES|QL:: +* ESQL: Fix `to_degrees()` returning infinity {es-pull}103209[#103209] (issue: {es-issue}102987[#102987]) +* ESQL: Fix planning of MV_EXPAND with foldable expressions {es-pull}101385[#101385] (issue: {es-issue}101118[#101118]) +* ESQL: Fix rare bug with empty string {es-pull}102350[#102350] (issue: {es-issue}101969[#101969]) +* ESQL: Fix resolution of MV_EXPAND after KEEP * {es-pull}103339[#103339] (issue: {es-issue}103331[#103331]) +* ESQL: Fix single value query {es-pull}102317[#102317] (issue: {es-issue}102298[#102298]) +* ESQL: Improve local folding of aggregates {es-pull}103670[#103670] +* ESQL: Improve pushdown of certain filters {es-pull}103671[#103671] +* ESQL: Narrow catch in convert functions {es-pull}101788[#101788] (issue: {es-issue}100820[#100820]) +* ESQL: Update the use of some user-caused exceptions {es-pull}104046[#104046] +* ESQL: remove `time_zone` request parameter {es-pull}102767[#102767] (issue: {es-issue}102159[#102159]) +* ES|QL: Fix NPE on single value detection {es-pull}103150[#103150] (issue: {es-issue}103141[#103141]) +* ES|QL: Improve resolution error management in `mv_expand` {es-pull}102967[#102967] (issue: {es-issue}102964[#102964]) +* Fix layout for MV_EXPAND {es-pull}102916[#102916] (issue: {es-issue}102912[#102912]) +* Fix planning of duplicate aggs {es-pull}102165[#102165] (issue: {es-issue}102083[#102083]) +* `AsyncOperator#isFinished` must never return true on failure {es-pull}104029[#104029] + +Engine:: +* Fix `lastUnsafeSegmentGenerationForGets` for realtime get {es-pull}101700[#101700] + +Geo:: +* Fix geo tile bounding boxes to be consistent with arithmetic method {es-pull}100826[#100826] (issues: {es-issue}92611[#92611], {es-issue}95574[#95574]) + +ILM+SLM:: +* Collect data tiers usage stats more efficiently {es-pull}102140[#102140] (issue: {es-issue}100230[#100230]) + +Indices APIs:: +* Fix template simulate setting application ordering {es-pull}103024[#103024] (issue: {es-issue}103008[#103008]) + +Infra/Core:: +* Cache component versions {es-pull}103408[#103408] (issue: {es-issue}102103[#102103]) +* Fix metric gauge creation model {es-pull}100609[#100609] + +Infra/Node Lifecycle:: +* Wait for reroute before acking put-shutdown {es-pull}103251[#103251] + +Infra/Plugins:: +* Making classname optional in Transport protocol {es-pull}99702[#99702] (issue: {es-issue}98584[#98584]) + +Infra/Scripting:: +* Make IPAddress writeable {es-pull}101093[#101093] (issue: {es-issue}101082[#101082]) + +Infra/Settings:: +* Report full stack trace for non-state file settings transforms {es-pull}101346[#101346] + +Ingest Node:: +* Sending an index name to `DocumentParsingObserver` that is not ever null {es-pull}100862[#100862] + +License:: +* Error log when license verification fails locally {es-pull}102919[#102919] + +Machine Learning:: +* Catch exceptions during `pytorch_inference` startup {es-pull}103873[#103873] +* Ensure the estimated latitude is within the allowed range {ml-pull}2586[#2586] +* Exclude quantiles when fetching model snapshots where possible {es-pull}103530[#103530] +* Fix `frequent_item_sets` aggregation on empty index {es-pull}103116[#103116] (issue: {es-issue}103067[#103067]) +* If trained model download task is in progress, wait for it to finish before executing start trained model deployment {es-pull}102944[#102944] +* Persist data counts on job close before results index refresh {es-pull}101147[#101147] +* Preserve response headers in Datafeed preview {es-pull}103923[#103923] +* Prevent attempts to access non-existent node information during rebalancing {es-pull}103361[#103361] +* Prevent resource over-subscription in model allocation planner {es-pull}100392[#100392] +* Remove dependency on the IPEX library {ml-pull}2605[#2605] and {ml-pull}2606[#2606] +* Start a new trace context before loading a trained model {es-pull}103124[#103124] +* Wait for the model results on graceful shutdown {es-pull}103591[#103591] (issue: {es-issue}103414[#103414]) + +Monitoring:: +* [Monitoring] Dont get cluster state until recovery {es-pull}100565[#100565] + +Network:: +* Ensure the correct `threadContext` for `RemoteClusterNodesAction` {es-pull}101050[#101050] + +Ranking:: +* Add an additional tiebreaker to RRF {es-pull}101847[#101847] (issue: {es-issue}101232[#101232]) + +Reindex:: +* Allow prefix index naming while reindexing from remote {es-pull}96968[#96968] (issue: {es-issue}89120[#89120]) + +Search:: +* Add JIT compiler excludes for `computeCommonPrefixLengthAndBuildHistogram` {es-pull}103112[#103112] +* Check that scripts produce correct json in render template action {es-pull}101518[#101518] (issue: {es-issue}101477[#101477]) +* Fix NPE & empty result handling in `CountOnlyQueryPhaseResultConsumer` {es-pull}103203[#103203] +* Fix format string in `OldLuceneVersions` {es-pull}103185[#103185] +* Handle timeout on standalone rewrite calls {es-pull}103546[#103546] +* Introduce Elasticsearch `PostingFormat` based on Lucene 90 positing format using PFOR {es-pull}103601[#103601] (issue: {es-issue}103002[#103002]) +* Restore inter-segment search concurrency with synthetic source is enabled {es-pull}103690[#103690] +* Support complex datemath expressions in index and index alias names {es-pull}100646[#100646] + +Snapshot/Restore:: +* More consistent logging messages for snapshot deletion {es-pull}101024[#101024] +* Reroute on shard snapshot completion {es-pull}101585[#101585] (issue: {es-issue}101514[#101514]) + +TSDB:: +* Throw when wrapping rate agg in `DeferableBucketAggregator` {es-pull}101032[#101032] + +Transform:: +* Add an assertion to the testTransformFeatureReset test case {es-pull}100287[#100287] +* Consider search context missing exceptions as recoverable {es-pull}102602[#102602] +* Consider task cancelled exceptions as recoverable {es-pull}100828[#100828] +* Fix NPE that is thrown by `_update` API {es-pull}104051[#104051] (issue: {es-issue}104048[#104048]) +* Log stacktrace together with log message in order to help debugging {es-pull}101607[#101607] +* Split comma-separated source index strings into separate indices {es-pull}102811[#102811] (issue: {es-issue}99564[#99564]) + +Vector Search:: +* Disallow vectors whose magnitudes will not fit in a float {es-pull}100519[#100519] + +Watcher:: +* Correctly logging watcher history write failures {es-pull}101802[#101802] + +[[enhancement-8.12.0]] +[float] +=== Enhancements + +Aggregations:: +* Check the real memory circuit breaker when building global ordinals {es-pull}102462[#102462] +* Disable concurrency for sampler and diversified sampler {es-pull}102832[#102832] +* Disable parallelism for composite agg against high cardinality fields {es-pull}102644[#102644] +* Enable concurrency for multi terms agg {es-pull}102710[#102710] +* Enable concurrency for scripted metric agg {es-pull}102461[#102461] +* Enable inter-segment concurrency for terms aggs {es-pull}101390[#101390] +* Export circuit breaker trip count as a counter metric {es-pull}101423[#101423] +* Introduce fielddata cache ttl {es-pull}102682[#102682] +* Status codes for Aggregation errors, part 2 {es-pull}100368[#100368] +* Support keyed histograms {es-pull}101826[#101826] (issue: {es-issue}100242[#100242]) + +Allocation:: +* Add more desired balance stats {es-pull}102065[#102065] +* Add undesired shard count {es-pull}101426[#101426] +* Expose reconciliation metrics via APM {es-pull}102244[#102244] + +Application:: +* Calculate CO2 and emmission and costs {es-pull}101979[#101979] +* Consider duplicate stacktraces in custom index {es-pull}102292[#102292] +* Enable Universal Profiling as Enterprise feature {es-pull}100333[#100333] +* Include totals in flamegraph response {es-pull}101126[#101126] +* Retrieve stacktrace events from a custom index {es-pull}102020[#102020] +* [Profiling] Notify early about task cancellation {es-pull}102740[#102740] +* [Profiling] Report in status API if docs exist {es-pull}102735[#102735] + +Authentication:: +* Add ldap user metadata mappings for full name and email {es-pull}102925[#102925] +* Add manage_enrich cluster privilege to kibana_system role {es-pull}101682[#101682] + +Authorization:: +* Remove `auto_configure` privilege for profiling {es-pull}101026[#101026] +* Use `BulkRequest` to store Application Privileges {es-pull}102056[#102056] +* Use non-deprecated SAML callback URL in SAML smoketests {es-pull}99983[#99983] (issue: {es-issue}99986[#99986]) +* Use non-deprecated SAML callback URL in tests {es-pull}99983[#99983] (issue: {es-issue}99985[#99985]) + +CAT APIs:: +* Expose roles by default in cat allocation API {es-pull}101753[#101753] + +CRUD:: +* Cache resolved index for mgets {es-pull}101311[#101311] + +Data streams:: +* Introduce new endpoint to expose data stream lifecycle stats {es-pull}101845[#101845] +* Switch logs data streams to search all fields by default {es-pull}102456[#102456] (issue: {es-issue}99872[#99872]) + +Distributed:: +* Add support for configuring proxy scheme in S3 client settings and EC2 discovery plugin {es-pull}102495[#102495] (issue: {es-issue}101873[#101873]) +* Introduce a `StreamOutput` that counts how many bytes are written to the stream {es-pull}102906[#102906] +* Push s3 requests count via metrics API {es-pull}100383[#100383] +* Record operation purpose for s3 stats collection {es-pull}100236[#100236] + +EQL:: +* Add error logging for *QL {es-pull}101057[#101057] +* Use the eql query filter for the open-pit request {es-pull}103212[#103212] + +ES|QL:: +* ESQL: Add `profile` option {es-pull}102713[#102713] +* ESQL: Alias duplicated aggregations in a stats {es-pull}100642[#100642] (issue: {es-issue}100544[#100544]) +* ESQL: Load more than one field at once {es-pull}102192[#102192] +* ESQL: Load stored fields sequentially {es-pull}102727[#102727] +* ESQL: Load text field from parent keyword field {es-pull}102490[#102490] (issue: {es-issue}102473[#102473]) +* ESQL: Make blocks ref counted {es-pull}100408[#100408] +* ESQL: Make fieldcaps calls lighter {es-pull}102510[#102510] (issues: {es-issue}101763[#101763], {es-issue}102393[#102393]) +* ESQL: More tracking in `BlockHash` impls {es-pull}101488[#101488] +* ESQL: New telemetry commands {es-pull}102937[#102937] +* ESQL: Share constant null Blocks {es-pull}102673[#102673] +* ESQL: Short circuit loading empty doc values {es-pull}102434[#102434] +* ESQL: Support the `_source` metadata field {es-pull}102391[#102391] +* ESQL: Track blocks emitted from lucene {es-pull}101396[#101396] +* ESQL: Track memory from values loaded from lucene {es-pull}101383[#101383] +* Fast path for reading single doc with ordinals {es-pull}102902[#102902] +* Introduce local block factory {es-pull}102901[#102901] +* Load different way {es-pull}101235[#101235] +* Track ESQL enrich memory {es-pull}102184[#102184] +* Track blocks in `AsyncOperator` {es-pull}102188[#102188] +* Track blocks of intermediate state of aggs {es-pull}102562[#102562] +* Track blocks when hashing single multi-valued field {es-pull}102612[#102612] +* Track pages in ESQL enrich request/response {es-pull}102190[#102190] + +Engine:: +* Add static node settings to set default values for max merged segment sizes {es-pull}102208[#102208] + +Geo:: +* Add runtime field of type `geo_shape` {es-pull}100492[#100492] (issue: {es-issue}61299[#61299]) + +Health:: +* Add message field to `HealthPeriodicLogger` and `S3RequestRetryStats` {es-pull}101989[#101989] +* Add non-green indicator names to `HealthPeriodicLogger` message {es-pull}102245[#102245] + +ILM+SLM:: +* Health Report API should not return RED for unassigned cold/frozen shards when data is available {es-pull}100776[#100776] +* Switch fleet's built-in ILM policies to use .actions.rollover.max_primary_shard_size {es-pull}99984[#99984] (issue: {es-issue}99983[#99983]) + +Indices APIs:: +* Add executed pipelines to bulk api response {es-pull}100031[#100031] +* Add support for marking component templates as deprecated {es-pull}101148[#101148] (issue: {es-issue}100992[#100992]) +* Allowing non-dynamic index settings to be updated by automatically unassigning shards {es-pull}101723[#101723] +* Rename component templates and pipelines according to the new naming conventions {es-pull}99975[#99975] +* Run `TransportGetAliasesAction` on local node {es-pull}101815[#101815] + +Infra/CLI:: +* Set `ActiveProcessorCount` when `node.processors` is set {es-pull}101846[#101846] + +Infra/Core:: +* Add apm api for asynchronous counters (always increasing) {es-pull}102598[#102598] +* Log errors in `RestResponse` regardless of `error_trace` parameter {es-pull}101066[#101066] (issue: {es-issue}100884[#100884]) + +Infra/Logging:: +* Add status code to `rest.suppressed` log output {es-pull}100990[#100990] + +Ingest Node:: +* Deprecate the unused `elasticsearch_version` field of enrich policy json {es-pull}103013[#103013] +* Optimize `MurmurHash3` {es-pull}101202[#101202] + +Machine Learning:: +* Accept a single or multiple inputs to `_inference` {es-pull}102075[#102075] +* Add basic telelemetry for the inference feature {es-pull}102877[#102877] +* Add internal inference action for ml models an services {es-pull}102731[#102731] +* Add prefix strings option to trained models {es-pull}102089[#102089] +* Estimate the memory required to deploy trained models more accurately {es-pull}98874[#98874] +* Improve stability of spike and dip detection for the change point aggregation {es-pull}102637[#102637] +* Include ML processor limits in `_ml/info` response {es-pull}101392[#101392] +* Read scores from downloaded vocabulary for XLM Roberta tokenizers {es-pull}101868[#101868] +* Support for GET all models and by task type in the `_inference` API {es-pull}102806[#102806] +* Upgrade Boost libraries to version 1.83 {ml-pull}2560[#2560] + +Mapping:: +* Improve analyzer reload log message {es-pull}102273[#102273] + +Monitoring:: +* Add memory utilization Kibana metric to the monitoring index templates {es-pull}102810[#102810] +* Added `beat.stats.libbeat.pipeline.queue.max_events` {es-pull}102570[#102570] + +Network:: +* Record more detailed HTTP stats {es-pull}99852[#99852] + +Search:: +* Add metrics to the shared blob cache {es-pull}101577[#101577] +* Add support for Serbian Language Analyzer {es-pull}100921[#100921] +* Add support for `index_filter` to open pit {es-pull}102388[#102388] (issue: {es-issue}99740[#99740]) +* Added metric for cache eviction of entries with non zero frequency {es-pull}100570[#100570] +* Disable inter-segment concurrency when sorting by field {es-pull}101535[#101535] +* Enable query phase parallelism within a single shard {es-pull}101230[#101230] (issue: {es-issue}80693[#80693]) +* Node stats as metrics {es-pull}102248[#102248] +* Optimize `_count` type API requests {es-pull}102888[#102888] + +Security:: +* Expose the `invalidation` field in Get/Query `ApiKey` APIs {es-pull}102472[#102472] +* Make `api_key.delete.interval` a dynamic setting {es-pull}102680[#102680] + +Snapshot/Restore:: +* Fail S3 repository analysis on partial reads {es-pull}102840[#102840] +* Parallelize stale index deletion {es-pull}100316[#100316] (issue: {es-issue}61513[#61513]) +* Repo analysis of uncontended register behaviour {es-pull}101185[#101185] +* Repo analysis: allow configuration of register ops {es-pull}102051[#102051] +* Repo analysis: verify empty register {es-pull}102048[#102048] + +Stats:: +* Introduce includeShardsStats in the stats request to indicate that we only fetch a summary {es-pull}100466[#100466] (issue: {es-issue}99744[#99744]) +* Set includeShardsStats = false in NodesStatsRequest where the caller does not use shards-level statistics {es-pull}100938[#100938] + +Store:: +* Add methods for adding generation listeners with primary term {es-pull}100899[#100899] +* Allow executing multiple periodic flushes while they are being made durable {es-pull}102571[#102571] +* Pass shard's primary term to Engine#addSegmentGenerationListener {es-pull}99752[#99752] + +Transform:: +* Implement exponential backoff for transform state persistence retrying {es-pull}102512[#102512] (issue: {es-issue}102528[#102528]) +* Make tasks that calculate checkpoints time out {es-pull}101055[#101055] +* Pass source query to `_field_caps` (as `index_filter`) when deducing destination index mappings for better performance {es-pull}102379[#102379] +* Pass transform source query as `index_filter` to `open_point_in_time` request {es-pull}102447[#102447] (issue: {es-issue}101049[#101049]) +* Skip shards that don't match the source query during checkpointing {es-pull}102138[#102138] + +Vector Search:: +* Add vector_operation_count in profile output for knn searches {es-pull}102032[#102032] +* Make cosine similarity faster by storing magnitude and normalizing vectors {es-pull}99445[#99445] + +[[feature-8.12.0]] +[float] +=== New features + +Application:: +* Enable Connectors API as technical preview {es-pull}102994[#102994] +* [Behavioral Analytics] Analytics collections use Data Stream Lifecycle (DSL) instead of Index Lifecycle Management (ILM) for data retention management. Behavioral analytics has traditionally used ILM to manage data retention. Starting with 8.12.0, this will change. Analytics collections created prior to 8.12.0 will continue to use their existing ILM policies, but new analytics collections will be managed using DSL. {es-pull}100033[#100033] + +Authentication:: +* Patterns support for allowed subjects by the JWT realm {es-pull}102426[#102426] + +Cluster Coordination:: +* Add a node feature join barrier. This prevents nodes from joining clusters that do not have all the features already present in the cluster. This ensures that once a features is supported by all the nodes in a cluster, that feature will never then not be supported in the future. This is the corresponding functionality for the version join barrier, but for features + {es-pull}101609[#101609] + +Data streams:: +* Add ability to create a data stream failure store {es-pull}99134[#99134] + +ES|QL:: +* ESQL: emit warnings from single-value functions processing multi-values {es-pull}102417[#102417] (issue: {es-issue}98743[#98743]) +* GEO_POINT and CARTESIAN_POINT type support {es-pull}102177[#102177] + +Infra/Core:: +* Create new cluster state API for querying features present on a cluster {es-pull}100974[#100974] + +Ingest Node:: +* Adding a simulate ingest api {es-pull}101409[#101409] + +Security:: +* Allow granting API keys with JWT as the access_token {es-pull}101904[#101904] + +Vector Search:: +* Add byte quantization for float vectors in HNSW {es-pull}102093[#102093] +* Make knn search a query {es-pull}98916[#98916] + +[[regression-8.12.0]] +[float] +=== Regressions + +Infra/Core:: +* Revert non-semantic `NodeInfo` {es-pull}102636[#102636] + +[[upgrade-8.12.0]] +[float] +=== Upgrades + +Search:: +* Upgrade to Lucene 9.9.1 {es-pull}103549[#103549] + diff --git a/docs/reference/search/search-your-data/knn-search.asciidoc b/docs/reference/search/search-your-data/knn-search.asciidoc index a847d9a306b7c..a68cacec8c10c 100644 --- a/docs/reference/search/search-your-data/knn-search.asciidoc +++ b/docs/reference/search/search-your-data/knn-search.asciidoc @@ -821,9 +821,6 @@ Now we have filtered based on the top level `"creation_time"` and only one docum Additionally, if you wanted to extract the nearest passage for a matched document, you can supply <> to the `knn` clause. -NOTE: `inner_hits` for kNN will only ever return a single hit, the nearest passage vector. -Setting `"size"` to any value greater than `1` will have no effect on the results. - NOTE: When using `inner_hits` and multiple `knn` clauses, be sure to specify the <> field. Otherwise, a naming clash can occur and fail the search request. @@ -848,7 +845,8 @@ POST passage_vectors/_search "_source": false, "fields": [ "paragraph.text" - ] + ], + "size": 1 } } } @@ -891,7 +889,7 @@ Now the result will contain the nearest found paragraph when searching. "paragraph": { "hits": { "total": { - "value": 1, + "value": 2, "relation": "eq" }, "max_score": 1.0, @@ -935,7 +933,7 @@ Now the result will contain the nearest found paragraph when searching. "paragraph": { "hits": { "total": { - "value": 1, + "value": 2, "relation": "eq" }, "max_score": 0.9997144, diff --git a/docs/reference/troubleshooting.asciidoc b/docs/reference/troubleshooting.asciidoc index e5ad75e048c1b..de1f9e6c7a608 100644 --- a/docs/reference/troubleshooting.asciidoc +++ b/docs/reference/troubleshooting.asciidoc @@ -58,9 +58,14 @@ fix problems that an {es} deployment might encounter. * <> * <> -If none of these solutions relate to your issue, you can still get help: +[discrete] +[[troubleshooting-contact-support]] +=== Contact us + +If none of these guides relate to your issue, or you need further assistance, +then you can contact us as follows: -* For users with an active subscription, you can get help in several ways: +* If you have an active subscription, you have several options: ** Go directly to the http://support.elastic.co[Support Portal] diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 24b81106dcea3..163221315280b 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -1689,46 +1689,25 @@ - - - + + + - - + + - - - + + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - + + + diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidator.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidator.java index 1a698b778687c..9ab7412426db8 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidator.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidator.java @@ -32,6 +32,13 @@ public class MetricNameValidator { static final int MAX_ELEMENT_LENGTH = 30; static final int MAX_NUMBER_OF_ELEMENTS = 10; + static final Set SKIP_VALIDATION_METRIC_NAMES_DUE_TO_BWC = Set.of( + "searchable_snapshots_cache_fetch_async", + "searchable_snapshots_cache_prewarming", + "security-token-key", + "security-crypto" + ); + private MetricNameValidator() {} /** @@ -42,6 +49,10 @@ private MetricNameValidator() {} */ public static String validate(String metricName) { Objects.requireNonNull(metricName); + + if (skipValidationToBWC(metricName)) { + return metricName; + } validateMaxMetricNameLength(metricName); String[] elements = metricName.split("\\."); @@ -53,6 +64,19 @@ public static String validate(String metricName) { return metricName; } + /** + * Due to backwards compatibility some metric names would have to skip validation. + * This is for instance where a threadpool name is too long, or contains `-` + * We want to allow to easily find threadpools in code base that are alerting with a metric + * as well as find thread pools metrics in dashboards with their codebase names. + * Renaming a threadpool name would be a breaking change. + * + * NOTE: only allow skipping validation if a refactor in codebase would cause a breaking change + */ + private static boolean skipValidationToBWC(String metricName) { + return SKIP_VALIDATION_METRIC_NAMES_DUE_TO_BWC.stream().anyMatch(m -> metricName.contains(m)); + } + private static void validateMaxMetricNameLength(String metricName) { if (metricName.length() > MAX_METRIC_NAME_LENGTH) { throw new IllegalArgumentException( @@ -108,6 +132,7 @@ private static void hasESPrefix(String[] elements, String name) { private static void perElementValidations(String[] elements, String name) { for (String element : elements) { + hasOnlyAllowedCharacters(element, name); hasNotBreachLengthLimit(element, name); } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidatorTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidatorTests.java index 64f78d0af494c..9a5479cc65a93 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidatorTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidatorTests.java @@ -78,6 +78,13 @@ public void testLastElementAllowList() { expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("es.somemodule.somemetric.some_other_suffix")); } + public void testSkipValidationDueToBWC() { + for (String partOfMetricName : MetricNameValidator.SKIP_VALIDATION_METRIC_NAMES_DUE_TO_BWC) { + MetricNameValidator.validate("es.threadpool." + partOfMetricName + ".total");// fake metric name, but with the part that skips + // validation + } + } + public static String metricNameWithLength(int length) { int prefixAndSuffix = "es.".length() + ".utilization".length(); assert length > prefixAndSuffix : "length too short"; @@ -99,4 +106,5 @@ public static String metricNameWithLength(int length) { metricName.append("utilization"); return metricName.toString(); } + } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index f34bb96b3eb81..9880e5e9914a8 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsAction; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; @@ -30,6 +29,7 @@ import org.elasticsearch.action.datastreams.lifecycle.ErrorEntry; import org.elasticsearch.action.datastreams.lifecycle.ExplainIndexDataStreamLifecycle; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; @@ -313,7 +313,7 @@ public void testAutomaticForceMerge() throws Exception { for (int i = 0; i < randomIntBetween(10, 50); i++) { indexDocs(dataStreamName, randomIntBetween(1, 300)); // Make sure the segments get written: - FlushResponse flushResponse = indicesAdmin().flush(new FlushRequest(toBeRolledOverIndex)).actionGet(); + BroadcastResponse flushResponse = indicesAdmin().flush(new FlushRequest(toBeRolledOverIndex)).actionGet(); assertThat(flushResponse.getStatus(), equalTo(RestStatus.OK)); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java index 519499addd77e..694e015b602f8 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java @@ -31,7 +31,7 @@ import java.io.UncheckedIOException; import java.time.Instant; import java.util.ArrayList; -import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Locale; @@ -177,14 +177,18 @@ private List findRoutingPaths(String indexName, Settings allSettings, Li } MappingParserContext parserContext = mapperService.parserContext(); - for (String pathMatch : template.pathMatch()) { + for (Iterator iterator = template.pathMatch().iterator(); iterator.hasNext();) { var mapper = parserContext.typeParser(mappingSnippetType) - // Since FieldMapper.parse modifies the Map passed in (removing entries for "type"), that means - // that only the first pathMatch passed in gets recognized as a time_series_dimension. To counteract - // that, we wrap the mappingSnippet in a new HashMap for each pathMatch instance. - .parse(pathMatch, new HashMap<>(mappingSnippet), parserContext) + .parse(iterator.next(), mappingSnippet, parserContext) .build(MapperBuilderContext.root(false, false)); extractPath(routingPaths, mapper); + if (iterator.hasNext()) { + // Since FieldMapper.parse modifies the Map passed in (removing entries for "type"), that means + // that only the first pathMatch passed in gets recognized as a time_series_dimension. + // To avoid this, each parsing call uses a new mapping snippet. + // Note that a shallow copy of the mappingSnippet map is not enough if there are multi-fields. + mappingSnippet = template.mappingForName(templateName, KeywordFieldMapper.CONTENT_TYPE); + } } } return routingPaths; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index eff40cb1dbe62..8b15d6a4b7bdf 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockAction; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequest; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockResponse; @@ -33,6 +32,7 @@ import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -1168,7 +1168,7 @@ private void forceMergeIndex(ForceMergeRequest forceMergeRequest, ActionListener logger.info("Data stream lifecycle is issuing a request to force merge index [{}]", targetIndex); client.admin().indices().forceMerge(forceMergeRequest, new ActionListener<>() { @Override - public void onResponse(ForceMergeResponse forceMergeResponse) { + public void onResponse(BroadcastResponse forceMergeResponse) { if (forceMergeResponse.getFailedShards() > 0) { DefaultShardOperationFailedException[] failures = forceMergeResponse.getShardFailures(); String message = Strings.format( diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java index 62d07467d5086..db0e3e5cd6258 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java @@ -493,6 +493,55 @@ public void testGenerateRoutingPathFromDynamicTemplateWithMultiplePathMatchEntri assertEquals(3, routingPathList.size()); } + public void testGenerateRoutingPathFromDynamicTemplateWithMultiplePathMatchEntriesMultiFields() throws Exception { + Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); + String mapping = """ + { + "_doc": { + "dynamic_templates": [ + { + "labels": { + "path_match": ["xprometheus.labels.*", "yprometheus.labels.*"], + "mapping": { + "type": "keyword", + "time_series_dimension": true, + "fields": { + "text": { + "type": "text" + } + } + } + } + } + ], + "properties": { + "host": { + "properties": { + "id": { + "type": "keyword", + "time_series_dimension": true + } + } + }, + "another_field": { + "type": "keyword" + } + } + } + } + """; + Settings result = generateTsdbSettings(mapping, now); + assertThat(result.size(), equalTo(3)); + assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); + assertThat( + IndexMetadata.INDEX_ROUTING_PATH.get(result), + containsInAnyOrder("host.id", "xprometheus.labels.*", "yprometheus.labels.*") + ); + List routingPathList = IndexMetadata.INDEX_ROUTING_PATH.get(result); + assertEquals(3, routingPathList.size()); + } + public void testGenerateRoutingPathFromDynamicTemplate_templateWithNoPathMatch() throws Exception { Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); String mapping = """ diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index befa16573de23..15f526d0a06d6 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequest; import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.RolloverConditions; @@ -27,6 +26,7 @@ import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -578,7 +578,7 @@ public void testForceMerge() throws Exception { // We want this test method to get fake force merge responses, because this is what triggers a cluster state update clientDelegate = (action, request, listener) -> { if (action.name().equals("indices:admin/forcemerge")) { - listener.onResponse(new ForceMergeResponse(5, 5, 0, List.of())); + listener.onResponse(new BroadcastResponse(5, 5, 0, List.of())); } }; String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); @@ -748,7 +748,7 @@ public void testForceMergeRetries() throws Exception { clientDelegate = (action, request, listener) -> { if (action.name().equals("indices:admin/forcemerge")) { listener.onResponse( - new ForceMergeResponse( + new BroadcastResponse( 5, 5, 1, @@ -779,7 +779,7 @@ public void testForceMergeRetries() throws Exception { AtomicInteger forceMergeFailedCount = new AtomicInteger(0); clientDelegate = (action, request, listener) -> { if (action.name().equals("indices:admin/forcemerge")) { - listener.onResponse(new ForceMergeResponse(5, 4, 0, List.of())); + listener.onResponse(new BroadcastResponse(5, 4, 0, List.of())); forceMergeFailedCount.incrementAndGet(); } }; @@ -800,7 +800,7 @@ public void testForceMergeRetries() throws Exception { // For the final data stream lifecycle run, we let forcemerge run normally clientDelegate = (action, request, listener) -> { if (action.name().equals("indices:admin/forcemerge")) { - listener.onResponse(new ForceMergeResponse(5, 5, 0, List.of())); + listener.onResponse(new BroadcastResponse(5, 5, 0, List.of())); } }; dataStreamLifecycleService.run(clusterService.state()); @@ -900,7 +900,7 @@ public void testForceMergeDedup() throws Exception { setState(clusterService, state); clientDelegate = (action, request, listener) -> { if (action.name().equals("indices:admin/forcemerge")) { - listener.onResponse(new ForceMergeResponse(5, 5, 0, List.of())); + listener.onResponse(new BroadcastResponse(5, 5, 0, List.of())); } }; for (int i = 0; i < 100; i++) { diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java index c7dbee47ea823..cbb41dfa02c5f 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java @@ -318,7 +318,7 @@ private String mockSearches(String databaseName, int firstChunk, int lastChunk) Map> requestMap = new HashMap<>(); for (int i = firstChunk; i <= lastChunk; i++) { byte[] chunk = data.get(i - firstChunk); - SearchHit hit = new SearchHit(i); + SearchHit hit = SearchHit.unpooled(i); try (XContentBuilder builder = XContentBuilder.builder(XContentType.SMILE.xContent())) { builder.map(Map.of("data", chunk)); builder.flush(); @@ -328,7 +328,7 @@ private String mockSearches(String databaseName, int firstChunk, int lastChunk) throw new UncheckedIOException(ex); } - SearchHits hits = new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f); + SearchHits hits = SearchHits.unpooled(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f); SearchResponse searchResponse = new SearchResponse(hits, null, null, false, null, null, 0, null, 1, 1, 0, 1L, null, null); toRelease.add(searchResponse::decRef); @SuppressWarnings("unchecked") diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index 7fdce03252687..915d54c91b259 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -17,13 +17,12 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.flush.FlushAction; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.index.TransportIndexAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -35,6 +34,7 @@ import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; import org.elasticsearch.threadpool.ThreadPool; @@ -79,7 +79,7 @@ public class GeoIpDownloaderTests extends ESTestCase { public void setup() { httpClient = mock(HttpClient.class); clusterService = mock(ClusterService.class); - threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build()); + threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), MeterRegistry.NOOP); when(clusterService.getClusterSettings()).thenReturn( new ClusterSettings( Settings.EMPTY, @@ -178,28 +178,34 @@ public int read() throws IOException { } public void testIndexChunksNoData() throws IOException { - client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener flushResponseActionListener) -> { + client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener flushResponseActionListener) -> { assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); - flushResponseActionListener.onResponse(mock(FlushResponse.class)); - }); - client.addHandler(RefreshAction.INSTANCE, (RefreshRequest request, ActionListener flushResponseActionListener) -> { - assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); - flushResponseActionListener.onResponse(mock(RefreshResponse.class)); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); }); + client.addHandler( + RefreshAction.INSTANCE, + (RefreshRequest request, ActionListener flushResponseActionListener) -> { + assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); + } + ); InputStream empty = new ByteArrayInputStream(new byte[0]); assertEquals(0, geoIpDownloader.indexChunks("test", empty, 0, "d41d8cd98f00b204e9800998ecf8427e", 0)); } public void testIndexChunksMd5Mismatch() { - client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener flushResponseActionListener) -> { - assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); - flushResponseActionListener.onResponse(mock(FlushResponse.class)); - }); - client.addHandler(RefreshAction.INSTANCE, (RefreshRequest request, ActionListener flushResponseActionListener) -> { + client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener flushResponseActionListener) -> { assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); - flushResponseActionListener.onResponse(mock(RefreshResponse.class)); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); }); + client.addHandler( + RefreshAction.INSTANCE, + (RefreshRequest request, ActionListener flushResponseActionListener) -> { + assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); + } + ); IOException exception = expectThrows( IOException.class, @@ -232,14 +238,17 @@ public void testIndexChunks() throws IOException { assertEquals(chunk + 15, source.get("chunk")); listener.onResponse(mock(IndexResponse.class)); }); - client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener flushResponseActionListener) -> { - assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); - flushResponseActionListener.onResponse(mock(FlushResponse.class)); - }); - client.addHandler(RefreshAction.INSTANCE, (RefreshRequest request, ActionListener flushResponseActionListener) -> { + client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener flushResponseActionListener) -> { assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); - flushResponseActionListener.onResponse(mock(RefreshResponse.class)); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); }); + client.addHandler( + RefreshAction.INSTANCE, + (RefreshRequest request, ActionListener flushResponseActionListener) -> { + assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); + } + ); InputStream big = new ByteArrayInputStream(bigArray); assertEquals(17, geoIpDownloader.indexChunks("test", big, 15, "a67563dfa8f3cba8b8cff61eb989a749", 0)); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index 9bdabcede8ec6..b867fcfb905ea 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -12,7 +12,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -26,7 +25,6 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Iterator; @@ -204,28 +202,6 @@ static final class Fields { static final String STATUS = "status"; } - public static MultiSearchTemplateResponse fromXContext(XContentParser parser) { - // The MultiSearchTemplateResponse is identical to the multi search response so we reuse the parsing logic in multi search response - MultiSearchResponse mSearchResponse = MultiSearchResponse.fromXContext(parser); - try { - org.elasticsearch.action.search.MultiSearchResponse.Item[] responses = mSearchResponse.getResponses(); - Item[] templateResponses = new Item[responses.length]; - int i = 0; - for (org.elasticsearch.action.search.MultiSearchResponse.Item item : responses) { - SearchTemplateResponse stResponse = null; - if (item.getResponse() != null) { - stResponse = new SearchTemplateResponse(); - stResponse.setResponse(item.getResponse()); - item.getResponse().incRef(); - } - templateResponses[i++] = new Item(stResponse, item.getFailure()); - } - return new MultiSearchTemplateResponse(templateResponses, mSearchResponse.getTook().millis()); - } finally { - mSearchResponse.decRef(); - } - } - @Override public String toString() { return Strings.toString(this); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java index 03f2fbd3e81a7..86f23397cfadb 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.Strings; @@ -95,8 +96,26 @@ private static MultiSearchTemplateResponse createTestInstanceWithFailures() { } @Override - protected MultiSearchTemplateResponse doParseInstance(XContentParser parser) throws IOException { - return MultiSearchTemplateResponse.fromXContext(parser); + protected MultiSearchTemplateResponse doParseInstance(XContentParser parser) { + // The MultiSearchTemplateResponse is identical to the multi search response so we reuse the parsing logic in multi search response + MultiSearchResponse mSearchResponse = MultiSearchResponse.fromXContext(parser); + try { + org.elasticsearch.action.search.MultiSearchResponse.Item[] responses = mSearchResponse.getResponses(); + MultiSearchTemplateResponse.Item[] templateResponses = new MultiSearchTemplateResponse.Item[responses.length]; + int i = 0; + for (org.elasticsearch.action.search.MultiSearchResponse.Item item : responses) { + SearchTemplateResponse stResponse = null; + if (item.getResponse() != null) { + stResponse = new SearchTemplateResponse(); + stResponse.setResponse(item.getResponse()); + item.getResponse().incRef(); + } + templateResponses[i++] = new MultiSearchTemplateResponse.Item(stResponse, item.getFailure()); + } + return new MultiSearchTemplateResponse(templateResponses, mSearchResponse.getTook().millis()); + } finally { + mSearchResponse.decRef(); + } } @Override diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java index 4e3d9baaf5c92..138007c104d2b 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java @@ -97,7 +97,7 @@ public void process(HitContext hit) throws IOException { BytesReference document = percolateQuery.getDocuments().get(slot); leafStoredFields.advanceTo(slot); HitContext subContext = new HitContext( - new SearchHit(slot, "unknown"), + SearchHit.unpooled(slot, "unknown"), percolatorLeafReaderContext, slot, leafStoredFields.storedFields(), diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java index b65d966bd6551..82ec63b785e56 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java @@ -56,7 +56,7 @@ public void testHitsExecute() throws Exception { LeafReaderContext context = reader.leaves().get(0); // A match: { - HitContext hit = new HitContext(new SearchHit(0), context, 0, Map.of(), Source.empty(null)); + HitContext hit = new HitContext(SearchHit.unpooled(0), context, 0, Map.of(), Source.empty(null)); PercolateQuery.QueryStore queryStore = ctx -> docId -> new TermQuery(new Term("field", "value")); MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value", new WhitespaceAnalyzer()); @@ -87,7 +87,7 @@ public void testHitsExecute() throws Exception { // No match: { - HitContext hit = new HitContext(new SearchHit(0), context, 0, Map.of(), Source.empty(null)); + HitContext hit = new HitContext(SearchHit.unpooled(0), context, 0, Map.of(), Source.empty(null)); PercolateQuery.QueryStore queryStore = ctx -> docId -> new TermQuery(new Term("field", "value")); MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value1", new WhitespaceAnalyzer()); @@ -117,7 +117,7 @@ public void testHitsExecute() throws Exception { // No query: { - HitContext hit = new HitContext(new SearchHit(0), context, 0, Map.of(), Source.empty(null)); + HitContext hit = new HitContext(SearchHit.unpooled(0), context, 0, Map.of(), Source.empty(null)); PercolateQuery.QueryStore queryStore = ctx -> docId -> null; MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value", new WhitespaceAnalyzer()); diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java index cc7397637e04a..061d8292b3e5f 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java @@ -14,21 +14,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.common.xcontent.XContentParserUtils; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; /** * Returns the results for a {@link RankEvalRequest}.
@@ -111,37 +103,4 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } - - private static final ParseField DETAILS_FIELD = new ParseField("details"); - private static final ParseField FAILURES_FIELD = new ParseField("failures"); - @SuppressWarnings("unchecked") - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "rank_eval_response", - true, - a -> new RankEvalResponse( - (Double) a[0], - ((List) a[1]).stream().collect(Collectors.toMap(EvalQueryQuality::getId, Function.identity())), - ((List>) a[2]).stream().collect(Collectors.toMap(Tuple::v1, Tuple::v2)) - ) - ); - static { - PARSER.declareDouble(ConstructingObjectParser.constructorArg(), EvalQueryQuality.METRIC_SCORE_FIELD); - PARSER.declareNamedObjects( - ConstructingObjectParser.optionalConstructorArg(), - (p, c, n) -> EvalQueryQuality.fromXContent(p, n), - DETAILS_FIELD - ); - PARSER.declareNamedObjects(ConstructingObjectParser.optionalConstructorArg(), (p, c, n) -> { - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, p.nextToken(), p); - XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, p.nextToken(), p); - Tuple tuple = new Tuple<>(n, ElasticsearchException.failureFromXContent(p)); - XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, p.nextToken(), p); - return tuple; - }, FAILURES_FIELD); - - } - - public static RankEvalResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.apply(parser, null); - } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java index d58c15d4efd74..f57c02bcdcc22 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java @@ -33,12 +33,12 @@ public class RatedSearchHit implements Writeable, ToXContentObject { private final OptionalInt rating; public RatedSearchHit(SearchHit searchHit, OptionalInt rating) { - this.searchHit = searchHit; + this.searchHit = searchHit.asUnpooled(); this.rating = rating; } RatedSearchHit(StreamInput in) throws IOException { - this(SearchHit.readFrom(in), in.readBoolean() ? OptionalInt.of(in.readVInt()) : OptionalInt.empty()); + this(SearchHit.readFrom(in, false), in.readBoolean() ? OptionalInt.of(in.readVInt()) : OptionalInt.empty()); } @Override diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java index d4ec7ba9b9ef5..d4d58c3c0ae71 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java @@ -21,10 +21,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -39,7 +43,9 @@ import java.util.List; import java.util.Map; import java.util.OptionalInt; +import java.util.function.Function; import java.util.function.Predicate; +import java.util.stream.Collectors; import static java.util.Collections.singleton; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; @@ -49,6 +55,32 @@ public class RankEvalResponseTests extends ESTestCase { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "rank_eval_response", + true, + a -> new RankEvalResponse( + (Double) a[0], + ((List) a[1]).stream().collect(Collectors.toMap(EvalQueryQuality::getId, Function.identity())), + ((List>) a[2]).stream().collect(Collectors.toMap(Tuple::v1, Tuple::v2)) + ) + ); + static { + PARSER.declareDouble(ConstructingObjectParser.constructorArg(), EvalQueryQuality.METRIC_SCORE_FIELD); + PARSER.declareNamedObjects( + ConstructingObjectParser.optionalConstructorArg(), + (p, c, n) -> EvalQueryQuality.fromXContent(p, n), + new ParseField("details") + ); + PARSER.declareNamedObjects(ConstructingObjectParser.optionalConstructorArg(), (p, c, n) -> { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, p.nextToken(), p); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, p.nextToken(), p); + Tuple tuple = new Tuple<>(n, ElasticsearchException.failureFromXContent(p)); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, p.nextToken(), p); + return tuple; + }, new ParseField("failures")); + } + private static final Exception[] RANDOM_EXCEPTIONS = new Exception[] { new ClusterBlockException(singleton(NoMasterBlockService.NO_MASTER_BLOCK_WRITES)), new CircuitBreakingException("Data too large", 123, 456, CircuitBreaker.Durability.PERMANENT), @@ -117,7 +149,7 @@ public void testXContentParsing() throws IOException { BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, pathsToExclude, random()); RankEvalResponse parsedItem; try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { - parsedItem = RankEvalResponse.fromXContent(parser); + parsedItem = PARSER.apply(parser, null); assertNull(parser.nextToken()); } assertNotSame(testItem, parsedItem); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java index 7dad062ab3bca..37de70ded462f 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java @@ -209,7 +209,7 @@ public void testDeleteByQuery() throws Exception { .addSort(SORTING_FIELD, SortOrder.DESC), response -> { // Modify a subset of the target documents concurrently - final List originalDocs = Arrays.asList(response.getHits().getHits()); + final List originalDocs = Arrays.asList(response.getHits().asUnpooled().getHits()); docsModifiedConcurrently.addAll(randomSubsetOf(finalConflictingOps, originalDocs)); } ); diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java index fcea4618f4cd4..609702a58bf84 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; @@ -24,6 +23,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -554,9 +554,9 @@ void refreshAndFinish(List indexingFailures, List search RefreshRequest refresh = new RefreshRequest(); refresh.indices(destinationIndices.toArray(new String[destinationIndices.size()])); logger.debug("[{}]: refreshing", task.getId()); - bulkClient.admin().indices().refresh(refresh, new ActionListener() { + bulkClient.admin().indices().refresh(refresh, new ActionListener<>() { @Override - public void onResponse(RefreshResponse response) { + public void onResponse(BroadcastResponse response) { finishHim(null, indexingFailures, searchFailures, timedOut); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java index c3cf7cf62f925..c40a4f72bc133 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java @@ -567,8 +567,8 @@ protected RequestWrapper buildRequest(Hit doc) { action.start(); // create a simulated response. - SearchHit hit = new SearchHit(0, "id").sourceRef(new BytesArray("{}")); - SearchHits hits = new SearchHits( + SearchHit hit = SearchHit.unpooled(0, "id").sourceRef(new BytesArray("{}")); + SearchHits hits = SearchHits.unpooled( IntStream.range(0, 100).mapToObj(i -> hit).toArray(SearchHit[]::new), new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0 diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java index 7ac50eb0e7c6c..44e69d3a4cda8 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java @@ -162,8 +162,8 @@ public void testScrollKeepAlive() { private SearchResponse createSearchResponse() { // create a simulated response. - SearchHit hit = new SearchHit(0, "id").sourceRef(new BytesArray("{}")); - SearchHits hits = new SearchHits( + SearchHit hit = SearchHit.unpooled(0, "id").sourceRef(new BytesArray("{}")); + SearchHits hits = SearchHits.unpooled( IntStream.range(0, randomIntBetween(0, 20)).mapToObj(i -> hit).toArray(SearchHit[]::new), new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0 diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index f5c1912d15251..e916b02e62b8e 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.blobstore.ESMockAPIBasedRepositoryIntegTestCase; @@ -41,6 +42,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.Predicate; import java.util.regex.Pattern; @@ -196,12 +198,21 @@ private static class AzureHTTPStatsCollectorHandler extends HttpStatsCollectorHa private static final Predicate LIST_PATTERN = Pattern.compile("GET /[a-zA-Z0-9]+/[a-zA-Z0-9]+\\?.+").asMatchPredicate(); private static final Predicate GET_BLOB_PATTERN = Pattern.compile("GET /[a-zA-Z0-9]+/[a-zA-Z0-9]+/.+").asMatchPredicate(); + private final Set seenRequestIds = ConcurrentCollections.newConcurrentSet(); + private AzureHTTPStatsCollectorHandler(HttpHandler delegate) { super(delegate); } @Override protected void maybeTrack(String request, Headers headers) { + // Same request id is a retry + // https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-ncnbi/817da997-30d2-4cd3-972f-a0073e4e98f7 + // Do not count retries since the client side request stats do not track them yet. + // See https://github.com/elastic/elasticsearch/issues/104443 + if (false == seenRequestIds.add(headers.getFirst("X-ms-client-request-id"))) { + return; + } if (GET_BLOB_PATTERN.test(request)) { trackRequest("GetBlob"); } else if (Regex.simpleMatch("HEAD /*/*/*", request)) { diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index e70151cbdf8ee..9ad2c57b7f585 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -15,8 +15,8 @@ import com.sun.net.httpserver.HttpHandler; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.blobstore.BlobContainer; @@ -191,7 +191,7 @@ public void testAbortRequestStats() throws Exception { waitForDocs(nbDocs, indexer); } flushAndRefresh(index); - ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); + BroadcastResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); assertThat(forceMerge.getSuccessfulShards(), equalTo(1)); assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); @@ -234,7 +234,7 @@ public void testMetrics() throws Exception { waitForDocs(nbDocs, indexer); } flushAndRefresh(index); - ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); + BroadcastResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); assertThat(forceMerge.getSuccessfulShards(), equalTo(1)); assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index ba762537537e3..83668cc271922 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -25,6 +25,7 @@ import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; +import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.io.IOException; @@ -84,13 +85,13 @@ protected S3Repository createRepository( @Override public Collection createComponents(PluginServices services) { - service.set(s3Service(services.environment(), services.clusterService().getSettings())); + service.set(s3Service(services.environment(), services.clusterService().getSettings(), services.resourceWatcherService())); this.service.get().refreshAndClearCache(S3ClientSettings.load(settings)); return List.of(service); } - S3Service s3Service(Environment environment, Settings nodeSettings) { - return new S3Service(environment, nodeSettings); + S3Service s3Service(Environment environment, Settings nodeSettings, ResourceWatcherService resourceWatcherService) { + return new S3Service(environment, nodeSettings, resourceWatcherService); } @Override diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 1fd31047c735a..fc58482651fa3 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -28,6 +28,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.coordination.stateless.StoreHeartbeatService; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.Strings; @@ -37,6 +38,9 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; +import org.elasticsearch.watcher.FileChangesListener; +import org.elasticsearch.watcher.FileWatcher; +import org.elasticsearch.watcher.ResourceWatcherService; import java.io.Closeable; import java.io.IOException; @@ -68,7 +72,6 @@ class S3Service implements Closeable { TimeValue.timeValueHours(24), Setting.Property.NodeScope ); - private volatile Map clientsCache = emptyMap(); /** @@ -90,12 +93,13 @@ class S3Service implements Closeable { final TimeValue compareAndExchangeTimeToLive; final TimeValue compareAndExchangeAntiContentionDelay; - S3Service(Environment environment, Settings nodeSettings) { + S3Service(Environment environment, Settings nodeSettings, ResourceWatcherService resourceWatcherService) { webIdentityTokenCredentialsProvider = new CustomWebIdentityTokenCredentialsProvider( environment, System::getenv, System::getProperty, - Clock.systemUTC() + Clock.systemUTC(), + resourceWatcherService ); compareAndExchangeTimeToLive = REPOSITORY_S3_CAS_TTL_SETTING.get(nodeSettings); compareAndExchangeAntiContentionDelay = REPOSITORY_S3_CAS_ANTI_CONTENTION_DELAY_SETTING.get(nodeSettings); @@ -333,7 +337,8 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials Environment environment, SystemEnvironment systemEnvironment, JvmEnvironment jvmEnvironment, - Clock clock + Clock clock, + ResourceWatcherService resourceWatcherService ) { // Check whether the original environment variable exists. If it doesn't, // the system doesn't support AWS web identity tokens @@ -395,6 +400,31 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials roleSessionName, webIdentityTokenFileSymlink.toString() ).withStsClient(stsClient).build(); + var watcher = new FileWatcher(webIdentityTokenFileSymlink); + watcher.addListener(new FileChangesListener() { + + @Override + public void onFileCreated(Path file) { + onFileChanged(file); + } + + @Override + public void onFileChanged(Path file) { + if (file.equals(webIdentityTokenFileSymlink)) { + LOGGER.debug("WS web identity token file [{}] changed, updating credentials", file); + credentialsProvider.refresh(); + } + } + }); + try { + resourceWatcherService.add(watcher, ResourceWatcherService.Frequency.LOW); + } catch (IOException e) { + throw new ElasticsearchException( + "failed to start watching AWS web identity token file [{}]", + e, + webIdentityTokenFileSymlink + ); + } } catch (Exception e) { stsClient.shutdown(); throw e; diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java index cecb0cd147897..fb775ab31c04d 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java @@ -9,16 +9,21 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; import com.sun.net.httpserver.HttpServer; import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.mocksocket.MockHttpServer; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.junit.After; import org.junit.Assert; import org.mockito.Mockito; @@ -36,12 +41,23 @@ import java.time.format.DateTimeFormatter; import java.util.Arrays; import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.function.Consumer; import java.util.stream.Collectors; public class CustomWebIdentityTokenCredentialsProviderTests extends ESTestCase { private static final String ROLE_ARN = "arn:aws:iam::123456789012:role/FederatedWebIdentityRole"; private static final String ROLE_NAME = "aws-sdk-java-1651084775908"; + private final TestThreadPool threadPool = new TestThreadPool("test"); + private final Settings settings = Settings.builder().put("resource.reload.interval.low", TimeValue.timeValueMillis(100)).build(); + private final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); + + @After + public void shutdown() throws Exception { + resourceWatcherService.close(); + threadPool.shutdown(); + } private static Environment getEnvironment() throws IOException { Path configDirectory = createTempDir("web-identity-token-test"); @@ -53,7 +69,7 @@ private static Environment getEnvironment() throws IOException { } @SuppressForbidden(reason = "HTTP server is used for testing") - public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { + private static HttpServer getHttpServer(Consumer webIdentityTokenCheck) throws IOException { HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); httpServer.createContext("/", exchange -> { try (exchange) { @@ -62,6 +78,7 @@ public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { .map(e -> e.split("=")) .collect(Collectors.toMap(e -> e[0], e -> URLDecoder.decode(e[1], StandardCharsets.UTF_8))); assertEquals(ROLE_NAME, params.get("RoleSessionName")); + webIdentityTokenCheck.accept(params.get("WebIdentityToken")); exchange.getResponseHeaders().add("Content-Type", "text/xml; charset=UTF-8"); byte[] response = Strings.format( @@ -97,25 +114,41 @@ public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { } }); httpServer.start(); + return httpServer; + } - Environment environment = getEnvironment(); - - // No region is set, but the SDK shouldn't fail because of that - Map environmentVariables = Map.of( - "AWS_WEB_IDENTITY_TOKEN_FILE", - "/var/run/secrets/eks.amazonaws.com/serviceaccount/token", - "AWS_ROLE_ARN", - ROLE_ARN - ); - Map systemProperties = Map.of( + @SuppressForbidden(reason = "HTTP server is used for testing") + private static Map getSystemProperties(HttpServer httpServer) { + return Map.of( "com.amazonaws.sdk.stsMetadataServiceEndpointOverride", "http://" + httpServer.getAddress().getHostName() + ":" + httpServer.getAddress().getPort() ); + } + + private static Map environmentVariables() { + return Map.of("AWS_WEB_IDENTITY_TOKEN_FILE", "/var/run/secrets/eks.amazonaws.com/serviceaccount/token", "AWS_ROLE_ARN", ROLE_ARN); + } + + private static void assertCredentials(AWSCredentials credentials) { + Assert.assertFalse(credentials.getAWSAccessKeyId().isEmpty()); + Assert.assertFalse(credentials.getAWSSecretKey().isEmpty()); + } + + @SuppressForbidden(reason = "HTTP server is used for testing") + public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { + HttpServer httpServer = getHttpServer(s -> assertEquals("YXdzLXdlYi1pZGVudGl0eS10b2tlbi1maWxl", s)); + + Environment environment = getEnvironment(); + + // No region is set, but the SDK shouldn't fail because of that + Map environmentVariables = environmentVariables(); + Map systemProperties = getSystemProperties(httpServer); var webIdentityTokenCredentialsProvider = new S3Service.CustomWebIdentityTokenCredentialsProvider( environment, environmentVariables::get, systemProperties::getOrDefault, - Clock.fixed(Instant.ofEpochMilli(1651084775908L), ZoneOffset.UTC) + Clock.fixed(Instant.ofEpochMilli(1651084775908L), ZoneOffset.UTC), + resourceWatcherService ); try { AWSCredentials credentials = S3Service.buildCredentials( @@ -124,8 +157,64 @@ public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { webIdentityTokenCredentialsProvider ).getCredentials(); - Assert.assertEquals("sts_access_key", credentials.getAWSAccessKeyId()); - Assert.assertEquals("secret_access_key", credentials.getAWSSecretKey()); + assertCredentials(credentials); + } finally { + webIdentityTokenCredentialsProvider.shutdown(); + httpServer.stop(0); + } + } + + private static class DelegatingConsumer implements Consumer { + private Consumer delegate; + + private DelegatingConsumer(Consumer delegate) { + this.delegate = delegate; + } + + private void setDelegate(Consumer delegate) { + this.delegate = delegate; + } + + @Override + public void accept(String s) { + delegate.accept(s); + } + } + + @SuppressForbidden(reason = "HTTP server is used for testing") + public void testPickUpNewWebIdentityTokenWhenItsChanged() throws Exception { + DelegatingConsumer webIdentityTokenCheck = new DelegatingConsumer(s -> assertEquals("YXdzLXdlYi1pZGVudGl0eS10b2tlbi1maWxl", s)); + + HttpServer httpServer = getHttpServer(webIdentityTokenCheck); + Environment environment = getEnvironment(); + Map environmentVariables = environmentVariables(); + Map systemProperties = getSystemProperties(httpServer); + var webIdentityTokenCredentialsProvider = new S3Service.CustomWebIdentityTokenCredentialsProvider( + environment, + environmentVariables::get, + systemProperties::getOrDefault, + Clock.fixed(Instant.ofEpochMilli(1651084775908L), ZoneOffset.UTC), + resourceWatcherService + ); + try { + AWSCredentialsProvider awsCredentialsProvider = S3Service.buildCredentials( + LogManager.getLogger(S3Service.class), + S3ClientSettings.getClientSettings(Settings.EMPTY, randomAlphaOfLength(8)), + webIdentityTokenCredentialsProvider + ); + assertCredentials(awsCredentialsProvider.getCredentials()); + + var latch = new CountDownLatch(1); + String newWebIdentityToken = "88f84342080d4671a511e10ae905b2b0"; + webIdentityTokenCheck.setDelegate(s -> { + if (s.equals(newWebIdentityToken)) { + latch.countDown(); + } + }); + Files.writeString(environment.configFile().resolve("repository-s3/aws-web-identity-token-file"), newWebIdentityToken); + + safeAwait(latch); + assertCredentials(awsCredentialsProvider.getCredentials()); } finally { webIdentityTokenCredentialsProvider.shutdown(); httpServer.stop(0); @@ -149,7 +238,8 @@ public void testSupportRegionalizedEndpoints() throws Exception { getEnvironment(), environmentVariables::get, systemProperties::getOrDefault, - Clock.systemUTC() + Clock.systemUTC(), + resourceWatcherService ); // We can't verify that webIdentityTokenCredentialsProvider's STS client uses the "https://sts.us-west-2.amazonaws.com" // endpoint in a unit test. The client depends on hardcoded RegionalEndpointsOptionResolver that in turn depends diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 085d438618a19..28a48c2968f59 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.security.AccessController; @@ -274,8 +275,8 @@ protected void assertSnapshotOrGenericThread() { } @Override - S3Service s3Service(Environment environment, Settings nodeSettings) { - return new ProxyS3Service(environment, nodeSettings); + S3Service s3Service(Environment environment, Settings nodeSettings, ResourceWatcherService resourceWatcherService) { + return new ProxyS3Service(environment, nodeSettings, resourceWatcherService); } public static final class ClientAndCredentials extends AmazonS3Wrapper { @@ -291,8 +292,8 @@ public static final class ProxyS3Service extends S3Service { private static final Logger logger = LogManager.getLogger(ProxyS3Service.class); - ProxyS3Service(Environment environment, Settings nodeSettings) { - super(environment, nodeSettings); + ProxyS3Service(Environment environment, Settings nodeSettings, ResourceWatcherService resourceWatcherService) { + super(environment, nodeSettings, resourceWatcherService); } @Override diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index 34e14dc718818..58c079515aa47 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.blobstore.AbstractBlobContainerRetriesTestCase; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; +import org.elasticsearch.watcher.ResourceWatcherService; import org.hamcrest.Matcher; import org.junit.After; import org.junit.Before; @@ -92,7 +93,7 @@ public class S3BlobContainerRetriesTests extends AbstractBlobContainerRetriesTes @Before public void setUp() throws Exception { shouldErrorOnDns = new AtomicBoolean(false); - service = new S3Service(Mockito.mock(Environment.class), Settings.EMPTY) { + service = new S3Service(Mockito.mock(Environment.class), Settings.EMPTY, Mockito.mock(ResourceWatcherService.class)) { @Override protected AmazonS3ClientBuilder buildClientBuilder(S3ClientSettings clientSettings) { final AmazonS3ClientBuilder builder = super.buildClientBuilder(clientSettings); @@ -509,6 +510,7 @@ public void testWriteLargeBlobStreaming() throws Exception { assertEquals(blobSize, bytesReceived.get()); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104436") public void testReadRetriesAfterMeaningfulProgress() throws Exception { final int maxRetries = between(0, 5); final int bufferSizeBytes = scaledRandomIntBetween( diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java index c48e0dc337d30..31bfd3a5e157f 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.watcher.ResourceWatcherService; import org.mockito.Mockito; import java.io.IOException; @@ -178,7 +179,7 @@ public void testRegionCanBeSet() throws IOException { ); assertThat(settings.get("default").region, is("")); assertThat(settings.get("other").region, is(region)); - try (S3Service s3Service = new S3Service(Mockito.mock(Environment.class), Settings.EMPTY)) { + try (var s3Service = new S3Service(Mockito.mock(Environment.class), Settings.EMPTY, Mockito.mock(ResourceWatcherService.class))) { AmazonS3Client other = (AmazonS3Client) s3Service.buildClient(settings.get("other")); assertThat(other.getSignerRegionOverride(), is(region)); } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index ab5edc4608bfd..0a92ed0a28973 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.hamcrest.Matchers; import org.mockito.Mockito; @@ -45,8 +46,8 @@ public void shutdown() { private static class DummyS3Service extends S3Service { - DummyS3Service(Environment environment) { - super(environment, Settings.EMPTY); + DummyS3Service(Environment environment, ResourceWatcherService resourceWatcherService) { + super(environment, Settings.EMPTY, resourceWatcherService); } @Override @@ -125,7 +126,7 @@ private S3Repository createS3Repo(RepositoryMetadata metadata) { return new S3Repository( metadata, NamedXContentRegistry.EMPTY, - new DummyS3Service(Mockito.mock(Environment.class)), + new DummyS3Service(Mockito.mock(Environment.class), Mockito.mock(ResourceWatcherService.class)), BlobStoreTestUtil.mockClusterService(), MockBigArrays.NON_RECYCLING_INSTANCE, new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java index bbdeea6d87631..33e56bcf2180b 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.watcher.ResourceWatcherService; import org.mockito.Mockito; import java.io.IOException; @@ -18,7 +19,11 @@ public class S3ServiceTests extends ESTestCase { public void testCachedClientsAreReleased() throws IOException { - final S3Service s3Service = new S3Service(Mockito.mock(Environment.class), Settings.EMPTY); + final S3Service s3Service = new S3Service( + Mockito.mock(Environment.class), + Settings.EMPTY, + Mockito.mock(ResourceWatcherService.class) + ); final Settings settings = Settings.builder().put("endpoint", "http://first").build(); final RepositoryMetadata metadata1 = new RepositoryMetadata("first", "s3", settings); final RepositoryMetadata metadata2 = new RepositoryMetadata("second", "s3", settings); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java index 224436a388ce5..3e74a74dbd49c 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.mocksocket.MockSocket; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportSettings; @@ -51,7 +52,7 @@ public class Netty4SizeHeaderFrameDecoderTests extends ESTestCase { @Before public void startThreadPool() { - threadPool = new ThreadPool(settings); + threadPool = new ThreadPool(settings, MeterRegistry.NOOP); NetworkService networkService = new NetworkService(Collections.emptyList()); PageCacheRecycler recycler = new MockPageCacheRecycler(Settings.EMPTY); nettyTransport = new Netty4Transport( diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 2d4313db1b8ff..b57d6bce26633 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -102,9 +102,6 @@ tasks.named("test").configure { } else { nonInputProperties.systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" } - if (BuildParams.random.nextBoolean()) { - env 'AWS_METADATA_SERVICE_TIMEOUT', '1' - } } tasks.named("check").configure { diff --git a/plugins/examples/rest-handler/src/main/java/org/elasticsearch/example/resthandler/ExampleRestHandlerPlugin.java b/plugins/examples/rest-handler/src/main/java/org/elasticsearch/example/resthandler/ExampleRestHandlerPlugin.java index 59131731d25e1..e142ba80147e0 100644 --- a/plugins/examples/rest-handler/src/main/java/org/elasticsearch/example/resthandler/ExampleRestHandlerPlugin.java +++ b/plugins/examples/rest-handler/src/main/java/org/elasticsearch/example/resthandler/ExampleRestHandlerPlugin.java @@ -10,6 +10,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; @@ -28,6 +29,7 @@ public class ExampleRestHandlerPlugin extends Plugin implements ActionPlugin { @Override public List getRestHandlers(final Settings settings, + final NamedWriteableRegistry namedWriteableRegistry, final RestController restController, final ClusterSettings clusterSettings, final IndexScopedSettings indexScopedSettings, diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java index e709b838a26f3..d91f7cf3e9a8d 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.IOUtils; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; @@ -313,6 +314,14 @@ protected ClientYamlTestExecutionContext createRestTestExecutionContext( public boolean clusterHasFeature(String featureId) { return testFeatureService.clusterHasFeature(featureId) && searchTestFeatureService.clusterHasFeature(featureId); } + + @Override + public Set getAllSupportedFeatures() { + return Sets.intersection( + testFeatureService.getAllSupportedFeatures(), + searchTestFeatureService.getAllSupportedFeatures() + ); + } }; final Set combinedOsSet = Stream.concat(osSet.stream(), Stream.of(searchOs)).collect(Collectors.toSet()); final Set combinedNodeVersions = Stream.concat(nodesVersions.stream(), searchNodeVersions.stream()) diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java index a331d6f54cb4a..ce11112bd4416 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.IOUtils; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; @@ -298,6 +299,14 @@ protected ClientYamlTestExecutionContext createRestTestExecutionContext( public boolean clusterHasFeature(String featureId) { return testFeatureService.clusterHasFeature(featureId) && searchTestFeatureService.clusterHasFeature(featureId); } + + @Override + public Set getAllSupportedFeatures() { + return Sets.intersection( + testFeatureService.getAllSupportedFeatures(), + searchTestFeatureService.getAllSupportedFeatures() + ); + } }; final Set combinedOsSet = Stream.concat(osSet.stream(), Stream.of(searchOs)).collect(Collectors.toSet()); final Set combinedNodeVersions = Stream.concat(nodesVersions.stream(), searchNodeVersions.stream()) diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 16209a73826ca..99b40b0f5c101 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -605,7 +605,7 @@ public void testShrinkAfterUpgrade() throws IOException { *
  • Make sure the document count is correct * */ - public void testRollover() throws IOException { + public void testRollover() throws Exception { if (isRunningAgainstOldCluster()) { client().performRequest( newXContentRequest( @@ -637,9 +637,12 @@ public void testRollover() throws IOException { ) ); - assertThat( - EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices?v")).getEntity()), - containsString("testrollover-000002") + // assertBusy to work around https://github.com/elastic/elasticsearch/issues/104371 + assertBusy( + () -> assertThat( + EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices?v&error_trace")).getEntity()), + containsString("testrollover-000002") + ) ); } diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java index 9c5415f1d5ea9..ca9528005758a 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -17,8 +17,8 @@ import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.tests.util.TimeUnits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.aggregations.pipeline.DerivativePipelineAggregationBuilder; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -199,7 +199,7 @@ private void indexDocuments(String idPrefix) throws IOException, InterruptedExce assertTrue(latch.await(30, TimeUnit.SECONDS)); - RefreshResponse refreshResponse = refresh(INDEX_NAME); + BroadcastResponse refreshResponse = refresh(INDEX_NAME); ElasticsearchAssertions.assertNoFailures(refreshResponse); } diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ClusterFeatureMigrationIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ClusterFeatureMigrationIT.java index 2d8ff8b747323..0487b282179a9 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ClusterFeatureMigrationIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ClusterFeatureMigrationIT.java @@ -13,7 +13,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.features.FeatureService; -import org.junit.BeforeClass; +import org.junit.Before; import java.io.IOException; import java.util.List; @@ -26,11 +26,11 @@ public class ClusterFeatureMigrationIT extends ParameterizedRollingUpgradeTestCase { - @BeforeClass - public static void checkMigrationVersion() { - assumeTrue( + @Before + public void checkMigrationVersion() { + assumeFalse( "This checks migrations from before cluster features were introduced", - getOldClusterVersion().before(FeatureService.CLUSTER_FEATURES_ADDED_VERSION) + oldClusterHasFeature(FeatureService.FEATURES_SUPPORTED) ); } diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java index 230ab39610b1e..1e9d3d41e6d24 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java @@ -16,11 +16,11 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.cluster.metadata.DesiredNodeWithStatus; -import org.elasticsearch.cluster.metadata.MetadataFeatures; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; @@ -48,13 +48,11 @@ private enum ProcessorsPrecision { } public void testUpgradeDesiredNodes() throws Exception { - assumeTrue("Desired nodes was introduced in 8.1", getOldClusterVersion().onOrAfter(Version.V_8_1_0)); + assumeTrue("Desired nodes was introduced in 8.1", oldClusterHasFeature(RestTestLegacyFeatures.DESIRED_NODE_API_SUPPORTED)); - var featureVersions = new MetadataFeatures().getHistoricalFeatures(); - - if (getOldClusterVersion().onOrAfter(featureVersions.get(DesiredNode.DOUBLE_PROCESSORS_SUPPORTED))) { + if (oldClusterHasFeature(DesiredNode.DOUBLE_PROCESSORS_SUPPORTED)) { assertUpgradedNodesCanReadDesiredNodes(); - } else if (getOldClusterVersion().onOrAfter(featureVersions.get(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORTED))) { + } else if (oldClusterHasFeature(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORTED)) { assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent(); } else { assertDesiredNodesWithFloatProcessorsAreRejectedInOlderVersions(); diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IndexingIT.java index d5b5e24e2ccde..273196f392064 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IndexingIT.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.test.ListMatcher; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; @@ -229,7 +230,7 @@ private void bulk(String index, String valueSuffix, int count) throws IOExceptio } public void testTsdb() throws IOException { - assumeTrue("indexing time series indices changed in 8.2.0", getOldClusterVersion().onOrAfter(Version.V_8_2_0)); + assumeTrue("indexing time series indices changed in 8.2.0", oldClusterHasFeature(RestTestLegacyFeatures.TSDB_NEW_INDEX_FORMAT)); StringBuilder bulk = new StringBuilder(); if (isOldCluster()) { @@ -337,7 +338,7 @@ private void assertTsdbAgg(Matcher... expected) throws IOException { } public void testSyntheticSource() throws IOException { - assumeTrue("added in 8.4.0", getOldClusterVersion().onOrAfter(Version.V_8_4_0)); + assumeTrue("added in 8.4.0", oldClusterHasFeature(RestTestLegacyFeatures.SYNTHETIC_SOURCE_SUPPORTED)); if (isOldCluster()) { Request createIndex = new Request("PUT", "/synthetic"); diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java index 5a2c4c783ec85..43bc8eacac98c 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java @@ -15,6 +15,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; @@ -69,6 +70,7 @@ public static Iterable parameters() { } private static final Set upgradedNodes = new HashSet<>(); + private static final Set oldClusterFeatures = new HashSet<>(); private static boolean upgradeFailed = false; private static IndexVersion oldIndexVersion; @@ -78,6 +80,13 @@ protected ParameterizedRollingUpgradeTestCase(@Name("upgradedNodes") int upgrade this.requestedUpgradedNodes = upgradedNodes; } + @Before + public void extractOldClusterFeatures() { + if (isOldCluster() && oldClusterFeatures.isEmpty()) { + oldClusterFeatures.addAll(testFeatureService.getAllSupportedFeatures()); + } + } + @Before public void extractOldIndexVersion() throws Exception { if (oldIndexVersion == null && upgradedNodes.isEmpty()) { @@ -138,13 +147,24 @@ public void upgradeNode() throws Exception { public static void resetNodes() { oldIndexVersion = null; upgradedNodes.clear(); + oldClusterFeatures.clear(); upgradeFailed = false; } + @Deprecated // Use the new testing framework and oldClusterHasFeature(feature) instead protected static org.elasticsearch.Version getOldClusterVersion() { return org.elasticsearch.Version.fromString(OLD_CLUSTER_VERSION); } + protected static boolean oldClusterHasFeature(String featureId) { + assert oldClusterFeatures.isEmpty() == false; + return oldClusterFeatures.contains(featureId); + } + + protected static boolean oldClusterHasFeature(NodeFeature feature) { + return oldClusterHasFeature(feature.id()); + } + protected static IndexVersion getOldClusterIndexVersion() { assert oldIndexVersion != null; return oldIndexVersion; diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java index 4b765849e6ea9..ef80643c82c0d 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java @@ -13,7 +13,6 @@ import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.util.EntityUtils; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -24,6 +23,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -49,13 +49,10 @@ public SnapshotBasedRecoveryIT(@Name("upgradedNodes") int upgradedNodes) { } public void testSnapshotBasedRecovery() throws Exception { - - assumeFalse( - "Cancel shard allocation command is broken for initial desired balance versions and might allocate shard " - + "on the node where it is not supposed to be. Fixed by https://github.com/elastic/elasticsearch/pull/93635", - getOldClusterVersion() == Version.V_8_6_0 - || getOldClusterVersion() == Version.V_8_6_1 - || getOldClusterVersion() == Version.V_8_7_0 + assumeTrue( + "Cancel shard allocation command is broken for initial versions of the desired_balance allocator", + oldClusterHasFeature(RestTestLegacyFeatures.DESIRED_BALANCED_ALLOCATOR_SUPPORTED) == false + || oldClusterHasFeature(RestTestLegacyFeatures.DESIRED_BALANCED_ALLOCATOR_FIXED) ); final String indexName = "snapshot_based_recovery"; diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java index b42646164b335..3ce0fc79087c2 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java @@ -10,11 +10,11 @@ import com.carrotsearch.randomizedtesting.annotations.Name; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.FormatNames; import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import java.io.IOException; import java.time.Instant; @@ -130,10 +130,7 @@ public TsdbIT(@Name("upgradedNodes") int upgradedNodes) { """; public void testTsdbDataStream() throws Exception { - assumeTrue( - "Skipping version [" + getOldClusterVersion() + "], because TSDB was GA-ed in 8.7.0", - getOldClusterVersion().onOrAfter(Version.V_8_7_0) - ); + assumeTrue("TSDB was GA-ed in 8.7.0", oldClusterHasFeature(RestTestLegacyFeatures.TSDB_GENERALLY_AVAILABLE)); String dataStreamName = "k8s"; if (isOldCluster()) { final String INDEX_TEMPLATE = """ @@ -159,8 +156,9 @@ public void testTsdbDataStream() throws Exception { public void testTsdbDataStreamWithComponentTemplate() throws Exception { assumeTrue( - "Skipping version [" + getOldClusterVersion() + "], because TSDB was GA-ed in 8.7.0 and bug was fixed in 8.11.0", - getOldClusterVersion().onOrAfter(Version.V_8_7_0) && getOldClusterVersion().before(Version.V_8_11_0) + "TSDB was GA-ed in 8.7.0 and bug was fixed in 8.11.0", + oldClusterHasFeature(RestTestLegacyFeatures.TSDB_GENERALLY_AVAILABLE) + && (oldClusterHasFeature(RestTestLegacyFeatures.TSDB_EMPTY_TEMPLATE_FIXED) == false) ); String dataStreamName = "template-with-component-template"; if (isOldCluster()) { diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java index 9647bfb739164..3af344051030b 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java @@ -10,13 +10,13 @@ import com.carrotsearch.randomizedtesting.annotations.Name; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import java.io.IOException; import java.util.Map; @@ -42,10 +42,7 @@ public void testOldIndexSettings() throws Exception { Request createTestIndex = new Request("PUT", "/" + INDEX_NAME); createTestIndex.setJsonEntity("{\"settings\": {\"index.indexing.slowlog.level\": \"WARN\"}}"); createTestIndex.setOptions(expectWarnings(EXPECTED_WARNING)); - if (getOldClusterVersion().before(Version.V_8_0_0)) { - // create index with settings no longer valid in 8.0 - client().performRequest(createTestIndex); - } else { + if (oldClusterHasFeature(RestTestLegacyFeatures.INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED)) { assertTrue( expectThrows(ResponseException.class, () -> client().performRequest(createTestIndex)).getMessage() .contains("unknown setting [index.indexing.slowlog.level]") @@ -53,12 +50,15 @@ public void testOldIndexSettings() throws Exception { Request createTestIndex1 = new Request("PUT", "/" + INDEX_NAME); client().performRequest(createTestIndex1); + } else { + // create index with settings no longer valid in 8.0 + client().performRequest(createTestIndex); } // add some data Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - if (getOldClusterVersion().before(Version.V_8_0_0)) { + if (oldClusterHasFeature(RestTestLegacyFeatures.INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED) == false) { bulk.setOptions(expectWarnings(EXPECTED_WARNING)); } bulk.setJsonEntity(Strings.format(""" @@ -70,7 +70,7 @@ public void testOldIndexSettings() throws Exception { // add some more data Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - if (getOldClusterVersion().before(Version.V_8_0_0)) { + if (oldClusterHasFeature(RestTestLegacyFeatures.INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED) == false) { bulk.setOptions(expectWarnings(EXPECTED_WARNING)); } bulk.setJsonEntity(Strings.format(""" @@ -79,7 +79,7 @@ public void testOldIndexSettings() throws Exception { """, INDEX_NAME)); client().performRequest(bulk); } else { - if (getOldClusterVersion().before(Version.V_8_0_0)) { + if (oldClusterHasFeature(RestTestLegacyFeatures.INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED) == false) { Request createTestIndex = new Request("PUT", "/" + INDEX_NAME + "/_settings"); // update index settings should work createTestIndex.setJsonEntity("{\"index.indexing.slowlog.level\": \"INFO\"}"); @@ -117,7 +117,7 @@ private void assertCount(String index, int countAtLeast) throws IOException { public static void updateIndexSettingsPermittingSlowlogDeprecationWarning(String index, Settings.Builder settings) throws IOException { Request request = new Request("PUT", "/" + index + "/_settings"); request.setJsonEntity(org.elasticsearch.common.Strings.toString(settings.build())); - if (getOldClusterVersion().before(Version.V_7_17_9)) { + if (oldClusterHasFeature(RestTestLegacyFeatures.DEPRECATION_WARNINGS_LEAK_FIXED) == false) { // There is a bug (fixed in 7.17.9 and 8.7.0 where deprecation warnings could leak into ClusterApplierService#applyChanges) // Below warnings are set (and leaking) from an index in this test case request.setOptions(expectVersionSpecificWarnings(v -> { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query.json index a2bcf67e8611c..85a2a46c8335d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query.json @@ -29,6 +29,11 @@ "type":"string", "description":"The character to use between values within a CSV row. Only valid for the csv format.", "default":false + }, + "drop_null_columns": { + "type": "boolean", + "description": "Should entirely null columns be removed from the results? Their name and type will be returning in a new `all_columns` section.", + "default": false } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query_get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query_get.json index bf38522cfb448..c4670758f7fe9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query_get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query_get.json @@ -33,6 +33,11 @@ "keep_alive": { "type": "time", "description": "Specify the time interval in which the results (partial or final) for this search will be available" + }, + "drop_null_columns": { + "type": "boolean", + "description": "Should entirely null columns be removed from the results? Their name and type will be returning in a new `all_columns` section.", + "default": false } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.query.json index 8810746851468..573fde5d9a9cd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.query.json @@ -29,6 +29,11 @@ "type":"string", "description":"The character to use between values within a CSV row. Only valid for the csv format.", "default":false + }, + "drop_null_columns": { + "type": "boolean", + "description": "Should entirely null columns be removed from the results? Their name and type will be returning in a new `all_columns` section.", + "default": false } }, "body":{ diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml index f44461e7b8143..c69e22d274c8e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml @@ -6,6 +6,9 @@ setup: indices.create: index: test body: + settings: + index: + number_of_shards: 2 mappings: properties: name: @@ -135,6 +138,172 @@ setup: - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} - match: {hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0"} --- +"nested kNN search inner_hits size > 1": + - skip: + version: ' - 8.12.99' + reason: 'inner_hits on nested kNN search added in 8.13' + + - do: + index: + index: test + id: "4" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] + - paragraph_id: 2 + vector: [ 0, 100.0, 0, 14.8, -156.0 ] + - paragraph_id: 3 + vector: [ 0, 1.0, 0, 1.8, -15.0 ] + + - do: + index: + index: test + id: "5" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] + - paragraph_id: 2 + vector: [ 0, 100.0, 0, 14.8, -156.0 ] + - paragraph_id: 3 + vector: [ 0, 1.0, 0, 1.8, -15.0 ] + + - do: + index: + index: test + id: "6" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] + - paragraph_id: 2 + vector: [ 0, 100.0, 0, 14.8, -156.0 ] + - paragraph_id: 3 + vector: [ 0, 1.0, 0, 1.8, -15.0 ] + - do: + indices.refresh: { } + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + k: 3 + num_candidates: 5 + inner_hits: {size: 2, fields: ["nested.paragraph_id"], _source: false} + + - match: {hits.total.value: 3} + - length: { hits.hits.0.inner_hits.nested.hits.hits: 2 } + - length: { hits.hits.1.inner_hits.nested.hits.hits: 2 } + - length: { hits.hits.2.inner_hits.nested.hits.hits: 2 } + + - match: { hits.hits.0.fields.name.0: "moose.jpg" } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + k: 5 + num_candidates: 5 + inner_hits: {size: 2, fields: ["nested.paragraph_id"], _source: false} + + - match: {hits.total.value: 5} + # All these initial matches are "moose.jpg", which has 3 nested vectors, but two are closest + - match: {hits.hits.0.fields.name.0: "moose.jpg"} + - length: { hits.hits.0.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.0.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + - match: {hits.hits.1.fields.name.0: "moose.jpg"} + - length: { hits.hits.1.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.1.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.1.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + - match: {hits.hits.2.fields.name.0: "moose.jpg"} + - length: { hits.hits.2.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.2.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.2.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + - match: {hits.hits.3.fields.name.0: "moose.jpg"} + - length: { hits.hits.3.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.3.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.3.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + # Rabbit only has one passage vector + - match: {hits.hits.4.fields.name.0: "rabbit.jpg"} + - length: { hits.hits.4.inner_hits.nested.hits.hits: 1 } + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] + k: 3 + num_candidates: 3 + filter: {term: {name: "cow.jpg"}} + inner_hits: {size: 3, fields: ["nested.paragraph_id"], _source: false} + + - match: {hits.total.value: 1} + - match: { hits.hits.0._id: "1" } + - length: { hits.hits.0.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.0.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "1" } +--- +"nested kNN search inner_hits & boosting": + - skip: + version: ' - 8.12.99' + reason: 'inner_hits on nested kNN search added in 8.13' + features: close_to + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + k: 3 + num_candidates: 5 + inner_hits: {size: 2, fields: ["nested.paragraph_id"], _source: false} + + - close_to: { hits.hits.0._score: {value: 0.00909090, error: 0.00001} } + - close_to: { hits.hits.0.inner_hits.nested.hits.hits.0._score: {value: 0.00909090, error: 0.00001} } + - close_to: { hits.hits.1._score: {value: 0.0021519717, error: 0.00001} } + - close_to: { hits.hits.1.inner_hits.nested.hits.hits.0._score: {value: 0.0021519717, error: 0.00001} } + - close_to: { hits.hits.2._score: {value: 0.00001, error: 0.00001} } + - close_to: { hits.hits.2.inner_hits.nested.hits.hits.0._score: {value: 0.00001, error: 0.00001} } + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + k: 3 + num_candidates: 5 + boost: 2 + inner_hits: {size: 2, fields: ["nested.paragraph_id"], _source: false} + - close_to: { hits.hits.0._score: {value: 0.0181818, error: 0.00001} } + - close_to: { hits.hits.0.inner_hits.nested.hits.hits.0._score: {value: 0.0181818, error: 0.00001} } + - close_to: { hits.hits.1._score: {value: 0.0043039434, error: 0.00001} } + - close_to: { hits.hits.1.inner_hits.nested.hits.hits.0._score: {value: 0.0043039434, error: 0.00001} } + - close_to: { hits.hits.2._score: {value: 0.00002, error: 0.00001} } + - close_to: { hits.hits.2.inner_hits.nested.hits.hits.0._score: {value: 0.00002, error: 0.00001} } +--- "nested kNN search inner_hits & profiling": - skip: version: ' - 8.12.99' @@ -144,7 +313,6 @@ setup: index: test body: profile: true - _source: false fields: [ "name" ] knn: field: nested.vector diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml index 435291b454d08..5d07c0c8b5f9d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml @@ -186,7 +186,6 @@ setup: - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } --- - "nested kNN search post-filtered on nested fields DOES NOT work": - do: search: @@ -211,3 +210,112 @@ setup: # TODO: fix it on Lucene level so nested knn respects num_candidates # or do pre-filtering - match: {hits.total.value: 0} +--- +"nested kNN search inner_hits size > 1": + - skip: + version: ' - 8.12.99' + reason: 'inner_hits on nested kNN search added in 8.13' + + - do: + index: + index: test + id: "4" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] + - paragraph_id: 2 + vector: [ 0, 100.0, 0, 14.8, -156.0 ] + - paragraph_id: 3 + vector: [ 0, 1.0, 0, 1.8, -15.0 ] + + - do: + index: + index: test + id: "5" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] + - paragraph_id: 2 + vector: [ 0, 100.0, 0, 14.8, -156.0 ] + - paragraph_id: 3 + vector: [ 0, 1.0, 0, 1.8, -15.0 ] + + - do: + index: + index: test + id: "6" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] + - paragraph_id: 2 + vector: [ 0, 100.0, 0, 14.8, -156.0 ] + - paragraph_id: 3 + vector: [ 0, 1.0, 0, 1.8, -15.0 ] + - do: + indices.refresh: { } + + - do: + search: + index: test + size: 3 + body: + fields: [ "name" ] + query: + nested: + path: nested + query: + knn: + field: nested.vector + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] + num_candidates: 5 + inner_hits: { size: 2, "fields": [ "nested.paragraph_id" ], _source: false } + + - match: {hits.total.value: 5} + - length: { hits.hits.0.inner_hits.nested.hits.hits: 2 } + - length: { hits.hits.1.inner_hits.nested.hits.hits: 2 } + - length: { hits.hits.2.inner_hits.nested.hits.hits: 2 } + + + - do: + search: + index: test + size: 5 + body: + fields: [ "name" ] + query: + nested: + path: nested + query: + knn: + field: nested.vector + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] + num_candidates: 5 + inner_hits: { size: 2, "fields": [ "nested.paragraph_id" ], _source: false } + + - match: {hits.total.value: 5} + # All these initial matches are "moose.jpg", which has 3 nested vectors, but two are closest + - match: {hits.hits.0.fields.name.0: "moose.jpg"} + - length: { hits.hits.0.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.0.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + - match: {hits.hits.1.fields.name.0: "moose.jpg"} + - length: { hits.hits.1.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.1.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.1.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + - match: {hits.hits.2.fields.name.0: "moose.jpg"} + - length: { hits.hits.2.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.2.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.2.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + - match: {hits.hits.3.fields.name.0: "moose.jpg"} + - length: { hits.hits.3.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.3.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.3.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + # Rabbit only has one passage vector + - match: {hits.hits.4.fields.name.0: "rabbit.jpg"} + - length: { hits.hits.4.inner_hits.nested.hits.hits: 1 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml index 948a6e04a128b..433592a32f963 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml @@ -249,7 +249,7 @@ setup: id: "1" body: name: cow.jpg - vector: [230.0, 300.33, -34.8988, 15.555, -200.0] + vector: [1, 2, 3, 4, 5] - do: index: @@ -257,7 +257,7 @@ setup: id: "2" body: name: moose.jpg - vector: [-0.5, 10.0, -13, 14.8, 15.0] + vector: [1, 1, 1, 1, 1] - do: index: @@ -265,7 +265,7 @@ setup: id: "3" body: name: rabbit.jpg - vector: [0.5, 111.3, -13.0, 14.8, -156.0] + vector: [1, 2, 2, 2, 2] # We force merge into a single segment to make sure scores are more uniform # Each segment can have a different quantization error, which can affect scores and mip is especially sensitive to this @@ -286,7 +286,7 @@ setup: num_candidates: 3 k: 3 field: vector - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [1, 2, 3, 4, 5] - length: {hits.hits: 3} @@ -303,7 +303,7 @@ setup: num_candidates: 3 k: 3 field: vector - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [1, 2, 3, 4, 5] filter: { "term": { "name": "moose.jpg" } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index b20f658a01510..cb3eee3c60c23 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.LatchedActionListener; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.GroupedActionListener; @@ -166,7 +165,7 @@ public void testBanOnlyNodesWithOutstandingDescendantTasks() throws Exception { ActionFuture rootTaskFuture = client().execute(TransportTestAction.ACTION, rootRequest); Set pendingRequests = allowPartialRequest(rootRequest); TaskId rootTaskId = getRootTaskId(rootRequest); - ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() + ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() .setTargetTaskId(rootTaskId) .waitForCompletion(true) .execute(); @@ -215,10 +214,10 @@ public void testCancelTaskMultipleTimes() throws Exception { ActionFuture mainTaskFuture = client().execute(TransportTestAction.ACTION, rootRequest); TaskId taskId = getRootTaskId(rootRequest); allowPartialRequest(rootRequest); - CancelTasksResponse resp = clusterAdmin().prepareCancelTasks().setTargetTaskId(taskId).waitForCompletion(false).get(); + ListTasksResponse resp = clusterAdmin().prepareCancelTasks().setTargetTaskId(taskId).waitForCompletion(false).get(); assertThat(resp.getTaskFailures(), empty()); assertThat(resp.getNodeFailures(), empty()); - ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() + ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() .setTargetTaskId(taskId) .waitForCompletion(true) .execute(); @@ -226,7 +225,7 @@ public void testCancelTaskMultipleTimes() throws Exception { allowEntireRequest(rootRequest); assertThat(cancelFuture.actionGet().getTaskFailures(), empty()); waitForRootTask(mainTaskFuture, false); - CancelTasksResponse cancelError = clusterAdmin().prepareCancelTasks() + ListTasksResponse cancelError = clusterAdmin().prepareCancelTasks() .setTargetTaskId(taskId) .waitForCompletion(randomBoolean()) .get(); @@ -245,7 +244,7 @@ public void testDoNotWaitForCompletion() throws Exception { allowPartialRequest(rootRequest); } boolean waitForCompletion = randomBoolean(); - ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() + ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() .setTargetTaskId(taskId) .waitForCompletion(waitForCompletion) .execute(); @@ -311,7 +310,7 @@ public void testRemoveBanParentsOnDisconnect() throws Exception { client().execute(TransportTestAction.ACTION, rootRequest); Set pendingRequests = allowPartialRequest(rootRequest); TaskId rootTaskId = getRootTaskId(rootRequest); - ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() + ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() .setTargetTaskId(rootTaskId) .waitForCompletion(true) .execute(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 21497b2e6fcfb..884f6dbcd677e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; @@ -503,7 +502,7 @@ public void testTasksCancellation() throws Exception { ); logger.info("--> cancelling the main test task"); - CancelTasksResponse cancelTasksResponse = clusterAdmin().prepareCancelTasks().setActions(TEST_TASK_ACTION.name()).get(); + ListTasksResponse cancelTasksResponse = clusterAdmin().prepareCancelTasks().setActions(TEST_TASK_ACTION.name()).get(); assertEquals(1, cancelTasksResponse.getTasks().size()); expectThrows(TaskCancelledException.class, future); @@ -722,7 +721,7 @@ public void testTasksWaitForAllTask() throws Exception { .map(PersistentTasksCustomMetadata.PersistentTask::getExecutorNode) .collect(Collectors.toSet()); // Spin up a request to wait for all tasks in the cluster to make sure it doesn't cause an infinite loop - ListTasksResponse response = clusterAdmin().prepareListTasks().setWaitForCompletion(true).setTimeout(timeValueSeconds(10)).get(); + ListTasksResponse response = clusterAdmin().prepareListTasks().setWaitForCompletion(true).setTimeout(timeValueSeconds(1)).get(); // We expect the nodes that are running always-running-tasks to report FailedNodeException and fail to list their tasks assertThat(response.getNodeFailures().size(), equalTo(nodesRunningTasks.size())); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java index 4d37f75894d56..e0805148a47e3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.cache.clear; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -33,7 +34,7 @@ public void testClearIndicesCacheWithBlocks() { for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { try { enableIndexBlock("test", blockSetting); - ClearIndicesCacheResponse clearIndicesCacheResponse = indicesAdmin().prepareClearCache("test") + BroadcastResponse clearIndicesCacheResponse = indicesAdmin().prepareClearCache("test") .setFieldDataCache(true) .setQueryCache(true) .setFieldDataCache(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java index 69d4f7aaef329..4e2fade87196f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.flush; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -44,7 +45,7 @@ public void testFlushWithBlocks() { )) { try { enableIndexBlock("test", blockSetting); - FlushResponse response = indicesAdmin().prepareFlush("test").get(); + BroadcastResponse response = indicesAdmin().prepareFlush("test").get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java index a3474afc96c51..b5d8ef0308b91 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.forcemerge; +import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -50,7 +51,7 @@ public void testForceMergeWithBlocks() { for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY_ALLOW_DELETE)) { try { enableIndexBlock("test", blockSetting); - ForceMergeResponse response = indicesAdmin().prepareForceMerge("test").get(); + BaseBroadcastResponse response = indicesAdmin().prepareForceMerge("test").get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { @@ -70,7 +71,7 @@ public void testForceMergeWithBlocks() { // Merging all indices is blocked when the cluster is read-only try { - ForceMergeResponse response = indicesAdmin().prepareForceMerge().get(); + BaseBroadcastResponse response = indicesAdmin().prepareForceMerge().get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeIT.java index 229558e9f4242..22bc37b2fb946 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeIT.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.forcemerge; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; @@ -51,13 +51,13 @@ public void testForceMergeUUIDConsistent() throws IOException { assertThat(getForceMergeUUID(primary), nullValue()); assertThat(getForceMergeUUID(replica), nullValue()); - final ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge(index).setMaxNumSegments(1).get(); + final BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge(index).setMaxNumSegments(1).get(); assertThat(forceMergeResponse.getFailedShards(), is(0)); assertThat(forceMergeResponse.getSuccessfulShards(), is(2)); // Force flush to force a new commit that contains the force flush UUID - final FlushResponse flushResponse = indicesAdmin().prepareFlush(index).setForce(true).get(); + final BroadcastResponse flushResponse = indicesAdmin().prepareFlush(index).setForce(true).get(); assertThat(flushResponse.getFailedShards(), is(0)); assertThat(flushResponse.getSuccessfulShards(), is(2)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java index 41abfc1219199..2067038e0fdd2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.refresh; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -39,7 +40,7 @@ public void testRefreshWithBlocks() { )) { try { enableIndexBlock("test", blockSetting); - RefreshResponse response = indicesAdmin().prepareRefresh("test").get(); + BroadcastResponse response = indicesAdmin().prepareRefresh("test").get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index 48ba897ebb76c..cc930cdad5950 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -166,11 +166,11 @@ public void testClusterInfoServiceCollectsInformation() { assertThat("some shard sizes are populated", shardSizes.values().size(), greaterThan(0)); for (DiskUsage usage : leastUsages.values()) { logger.info("--> usage: {}", usage); - assertThat("usage has be retrieved", usage.getFreeBytes(), greaterThan(0L)); + assertThat("usage has be retrieved", usage.freeBytes(), greaterThan(0L)); } for (DiskUsage usage : mostUsages.values()) { logger.info("--> usage: {}", usage); - assertThat("usage has be retrieved", usage.getFreeBytes(), greaterThan(0L)); + assertThat("usage has be retrieved", usage.freeBytes(), greaterThan(0L)); } for (Long size : shardSizes.values()) { logger.info("--> shard size: {}", size); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index 8843e7ff39bc6..895a60133251f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -12,12 +12,12 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.refresh.TransportUnpromotableShardRefreshAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.TransportClosePointInTimeAction; import org.elasticsearch.action.search.TransportOpenPointInTimeAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -699,7 +699,7 @@ public void testRefreshFailsIfUnpromotableDisconnects() throws Exception { }); } - RefreshResponse response = indicesAdmin().prepareRefresh(INDEX_NAME).get(); + BroadcastResponse response = indicesAdmin().prepareRefresh(INDEX_NAME).get(); assertThat( "each unpromotable replica shard should be added to the shard failures", response.getFailedShards(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index c044fafe31efc..df23f8b9fd983 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -301,7 +301,7 @@ private void refreshDiskUsage() { .getNodeMostAvailableDiskUsages() .values() .stream() - .allMatch(e -> e.getFreeBytes() > WATERMARK_BYTES)) { + .allMatch(e -> e.freeBytes() > WATERMARK_BYTES)) { assertAcked(clusterAdmin().prepareReroute()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java index d3001f485846e..709f6b866ba28 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java @@ -10,17 +10,15 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; -import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Strings; import org.elasticsearch.test.ESIntegTestCase; @@ -63,7 +61,7 @@ public void testIndexActions() throws Exception { assertThat(indexResponse.getIndex(), equalTo(getConcreteIndexName())); assertThat(indexResponse.getId(), equalTo("1")); logger.info("Refreshing"); - RefreshResponse refreshResponse = refresh(); + BroadcastResponse refreshResponse = refresh(); assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); logger.info("--> index exists?"); @@ -72,7 +70,7 @@ public void testIndexActions() throws Exception { assertThat(indexExists("test1234565"), equalTo(false)); logger.info("Clearing cache"); - ClearIndicesCacheResponse clearIndicesCacheResponse = indicesAdmin().clearCache( + BroadcastResponse clearIndicesCacheResponse = indicesAdmin().clearCache( new ClearIndicesCacheRequest("test").fieldDataCache(true).queryCache(true) ).actionGet(); assertNoFailures(clearIndicesCacheResponse); @@ -80,7 +78,7 @@ public void testIndexActions() throws Exception { logger.info("Force Merging"); waitForRelocation(ClusterHealthStatus.GREEN); - ForceMergeResponse mergeResponse = forceMerge(); + BaseBroadcastResponse mergeResponse = forceMerge(); assertThat(mergeResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); GetResponse getResult; @@ -130,7 +128,7 @@ public void testIndexActions() throws Exception { client().index(new IndexRequest("test").id("2").source(source("2", "test2"))).actionGet(); logger.info("Flushing"); - FlushResponse flushResult = indicesAdmin().prepareFlush("test").get(); + BroadcastResponse flushResult = indicesAdmin().prepareFlush("test").get(); assertThat(flushResult.getSuccessfulShards(), equalTo(numShards.totalNumShards)); assertThat(flushResult.getFailedShards(), equalTo(0)); logger.info("Refreshing"); @@ -220,7 +218,7 @@ public void testBulk() throws Exception { assertThat(bulkResponse.getItems()[5].getIndex(), equalTo(getConcreteIndexName())); waitForRelocation(ClusterHealthStatus.GREEN); - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").get(); + BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh("test").get(); assertNoFailures(refreshResponse); assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java index d4fe2fcb4d4c1..c9809574002c8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequestBuilder; import org.elasticsearch.action.get.GetResponse; @@ -18,6 +17,7 @@ import org.elasticsearch.action.get.MultiGetRequestBuilder; import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; @@ -641,7 +641,7 @@ public void testGetFieldsComplexField() throws Exception { ensureGreen(); logger.info("flushing"); - FlushResponse flushResponse = indicesAdmin().prepareFlush("my-index").setForce(true).get(); + BroadcastResponse flushResponse = indicesAdmin().prepareFlush("my-index").setForce(true).get(); if (flushResponse.getSuccessfulShards() == 0) { StringBuilder sb = new StringBuilder("failed to flush at least one shard. total shards [").append( flushResponse.getTotalShards() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java index 3dd9feff9ce25..1c715beb04356 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.indices; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.BigArrays; @@ -99,7 +99,7 @@ public void testDeletesAloneCanTriggerRefresh() throws Exception { prepareIndex("index").setId(Integer.toString(i)).setSource("field", "value").get(); } // Force merge so we know all merges are done before we start deleting: - ForceMergeResponse r = client().admin().indices().prepareForceMerge().setMaxNumSegments(1).get(); + BaseBroadcastResponse r = client().admin().indices().prepareForceMerge().setMaxNumSegments(1).get(); assertNoFailures(r); final RefreshStats refreshStats = shard.refreshStats(); for (int i = 0; i < 100; i++) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 0b99e3ba3ffcf..62e6cb59994b2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -9,8 +9,8 @@ package org.elasticsearch.indices; import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; @@ -132,7 +132,7 @@ public void testQueryRewrite() throws Exception { assertCacheState(client, "index", 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); + BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); refresh(); ensureSearchable("index"); @@ -202,7 +202,7 @@ public void testQueryRewriteMissingValues() throws Exception { assertCacheState(client, "index", 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); + BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); refresh(); ensureSearchable("index"); @@ -269,7 +269,7 @@ public void testQueryRewriteDates() throws Exception { assertCacheState(client, "index", 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); + BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); refresh(); ensureSearchable("index"); @@ -343,7 +343,7 @@ public void testQueryRewriteDatesWithNow() throws Exception { assertCacheState(client, "index-3", 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = client.admin() + BroadcastResponse forceMergeResponse = client.admin() .indices() .prepareForceMerge("index-1", "index-2", "index-3") .setFlush(true) @@ -424,7 +424,7 @@ public void testCanCache() throws Exception { assertCacheState(client, "index", 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); + BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); refresh(); ensureSearchable("index"); @@ -529,7 +529,7 @@ public void testCacheWithFilteredAlias() { ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); client.prepareIndex("index").setId("1").setRouting("1").setSource("created_at", DateTimeFormatter.ISO_LOCAL_DATE.format(now)).get(); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); + BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); refresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java index a328148180107..17b18bf9af1ee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java @@ -9,8 +9,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; @@ -54,9 +54,9 @@ public void testWaitIfOngoing() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(10); final CopyOnWriteArrayList errors = new CopyOnWriteArrayList<>(); for (int j = 0; j < 10; j++) { - indicesAdmin().prepareFlush("test").execute(new ActionListener() { + indicesAdmin().prepareFlush("test").execute(new ActionListener<>() { @Override - public void onResponse(FlushResponse flushResponse) { + public void onResponse(BroadcastResponse flushResponse) { try { // don't use assertAllSuccessful it uses a randomized context that belongs to a different thread assertThat( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index e5a8246ba6033..70cd143686dc8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -9,8 +9,8 @@ package org.elasticsearch.indices.mapping; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; @@ -82,7 +82,7 @@ public void testDynamicUpdates() throws Exception { indexRandom(true, false, indexRequests); logger.info("checking all the documents are there"); - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh().get(); + BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh().get(); assertThat(refreshResponse.getFailedShards(), equalTo(0)); assertHitCount(prepareSearch("test").setSize(0), recCount); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java index 2935efb4808a7..22f987cc855cc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java @@ -15,9 +15,9 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.Setting; @@ -134,7 +134,7 @@ public void testBreakerWithRandomExceptions() throws IOException, InterruptedExc } logger.info("Start Refresh"); // don't assert on failures here - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").execute().get(); + BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh("test").execute().get(); final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0; logger.info( "Refresh failed: [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java index a98297e8b49ae..e70c48ce8184e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; @@ -31,6 +30,7 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -1138,7 +1138,7 @@ public void testFilterCacheStats() throws Exception { }); flush("index"); logger.info("--> force merging to a single segment"); - ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).setMaxNumSegments(1).get(); + BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).setMaxNumSegments(1).get(); assertAllSuccessful(forceMergeResponse); logger.info("--> refreshing"); refresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index d47c68690bab8..782aafece4399 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -8,11 +8,11 @@ package org.elasticsearch.recovery; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; @@ -405,7 +405,7 @@ private void logSearchResponse(int numberOfShards, long numberOfDocs, int iterat private void refreshAndAssert() throws Exception { assertBusy(() -> { - RefreshResponse actionGet = indicesAdmin().prepareRefresh().get(); + BroadcastResponse actionGet = indicesAdmin().prepareRefresh().get(); assertAllSuccessful(actionGet); }, 5, TimeUnit.MINUTES); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java index bd69aebcd415e..baa721cbbabd2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java @@ -9,12 +9,11 @@ package org.elasticsearch.recovery; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentType; @@ -43,12 +42,12 @@ public void testSimpleRecovery() throws Exception { NumShards numShards = getNumShards("test"); client().index(new IndexRequest("test").id("1").source(source("1", "test"), XContentType.JSON)).actionGet(); - FlushResponse flushResponse = indicesAdmin().flush(new FlushRequest("test")).actionGet(); + BroadcastResponse flushResponse = indicesAdmin().flush(new FlushRequest("test")).actionGet(); assertThat(flushResponse.getTotalShards(), equalTo(numShards.totalNumShards)); assertThat(flushResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); assertThat(flushResponse.getFailedShards(), equalTo(0)); client().index(new IndexRequest("test").id("2").source(source("2", "test"), XContentType.JSON)).actionGet(); - RefreshResponse refreshResponse = indicesAdmin().refresh(new RefreshRequest("test")).actionGet(); + BroadcastResponse refreshResponse = indicesAdmin().refresh(new RefreshRequest("test")).actionGet(); assertThat(refreshResponse.getTotalShards(), equalTo(numShards.totalNumShards)); assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); assertThat(refreshResponse.getFailedShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java index 97a400709cde7..68d00321848eb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.basic; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.index.query.QueryBuilders; @@ -55,7 +55,7 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) createIndex("test"); } prepareIndex("test").setId(id).setSource("field", "test").get(); - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").get(); + BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh("test").get(); // at least one shard should be successful when refreshing assertThat(refreshResponse.getSuccessfulShards(), greaterThanOrEqualTo(1)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java index 6ebfc61830269..6985ebb17386c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java @@ -14,8 +14,8 @@ import org.apache.lucene.tests.util.English; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -111,7 +111,7 @@ public void testRandomExceptions() throws IOException, InterruptedException, Exe } logger.info("Start Refresh"); // don't assert on failures here - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").execute().get(); + BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh("test").execute().get(); final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0; logger.info( "Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index 33ef75b317e33..07d976437c24c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -13,8 +13,8 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -135,7 +135,7 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc ESIntegTestCase.NumShards numShards = getNumShards("test"); logger.info("Start Refresh"); // don't assert on failures here - final RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").execute().get(); + final BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh("test").execute().get(); final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0; logger.info( "Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java index c4b0346170949..303030a523662 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java @@ -12,10 +12,10 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Priority; @@ -50,7 +50,7 @@ public void testFailedSearchWithWrongQuery() throws Exception { for (int i = 0; i < 100; i++) { index(client(), Integer.toString(i), "test", i); } - RefreshResponse refreshResponse = indicesAdmin().refresh(new RefreshRequest("test")).actionGet(); + BroadcastResponse refreshResponse = indicesAdmin().refresh(new RefreshRequest("test")).actionGet(); assertThat(refreshResponse.getTotalShards(), equalTo(test.totalNumShards)); assertThat(refreshResponse.getSuccessfulShards(), equalTo(test.numPrimaries)); assertThat(refreshResponse.getFailedShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java index cf8d81f406f91..eedda05dcb102 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java @@ -11,7 +11,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -268,7 +268,7 @@ public void testCancel() throws Exception { final CancelTasksRequest cancelRequest = new CancelTasksRequest().setTargetTaskId(rootTask.taskId()); cancelRequest.setWaitForCompletion(randomBoolean()); - final ActionFuture cancelFuture = client().admin().cluster().cancelTasks(cancelRequest); + final ActionFuture cancelFuture = client().admin().cluster().cancelTasks(cancelRequest); assertBusy(() -> { final Iterable transportServices = cluster("cluster_a").getInstances(TransportService.class); for (TransportService transportService : transportServices) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java index 8f178397f508b..1fe128da6889c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java @@ -112,7 +112,7 @@ public void testConsistentHitsWithSameSeed() throws Exception { CoreMatchers.equalTo(0) ); final int hitCount = response.getHits().getHits().length; - final SearchHit[] currentHits = response.getHits().getHits(); + final SearchHit[] currentHits = response.getHits().asUnpooled().getHits(); ArrayUtil.timSort(currentHits, (o1, o2) -> { // for tie-breaking we have to resort here since if the score is // identical we rely on collection order which might change. diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java index 3dd9e68cf08af..f830ca9ac0cb6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.search.vectors.KnnSearchBuilder; import org.elasticsearch.test.ESIntegTestCase; @@ -66,8 +67,9 @@ public void testSimpleNested() throws Exception { refresh(); assertResponse( - prepareSearch("test").setKnnSearch(List.of(new KnnSearchBuilder("nested.vector", new float[] { 1, 1, 1 }, 1, 1, null))) - .setAllowPartialSearchResults(false), + prepareSearch("test").setKnnSearch( + List.of(new KnnSearchBuilder("nested.vector", new float[] { 1, 1, 1 }, 1, 1, null).innerHit(new InnerHitBuilder())) + ).setAllowPartialSearchResults(false), response -> assertThat(response.getHits().getHits().length, greaterThan(0)) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 81659323e2471..20c5c11f36756 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -12,13 +12,13 @@ import org.apache.lucene.analysis.TokenStreamToAutomaton; import org.apache.lucene.search.suggest.document.ContextSuggestField; import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.FieldMemoryStats; import org.elasticsearch.common.settings.Settings; @@ -1267,7 +1267,7 @@ public void testPrunedSegments() throws IOException { .get(); // we have 2 docs in a segment... prepareIndex(INDEX).setId("2").setSource(jsonBuilder().startObject().field("somefield", "somevalue").endObject()).get(); - ForceMergeResponse actionGet = indicesAdmin().prepareForceMerge().setFlush(true).setMaxNumSegments(1).get(); + BroadcastResponse actionGet = indicesAdmin().prepareForceMerge().setFlush(true).setMaxNumSegments(1).get(); assertAllSuccessful(actionGet); refresh(); // update the first one and then merge.. the target segment will have no value in FIELD diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java index b126e4e51128f..df4d52727384f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStats; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -20,6 +19,7 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.settings.Settings; @@ -159,7 +159,7 @@ public void testForceMergeCausesFullSnapshot() throws Exception { clusterAdmin().prepareCreateSnapshot(repo, snapshot1).setIndices(indexName).setWaitForCompletion(true).get(); logger.info("--> force merging down to a single segment"); - final ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge(indexName).setMaxNumSegments(1).setFlush(true).get(); + final BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge(indexName).setMaxNumSegments(1).setFlush(true).get(); assertThat(forceMergeResponse.getFailedShards(), is(0)); final String snapshot2 = "snap-2"; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index ed070c3224aa2..c13891728f315 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -21,11 +21,11 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.RestoreInProgress; @@ -119,7 +119,7 @@ public void testBasicWorkFlow() throws Exception { createIndexWithRandomDocs("test-idx-2", 100); createIndexWithRandomDocs("test-idx-3", 100); - ActionFuture flushResponseFuture = null; + ActionFuture flushResponseFuture = null; if (randomBoolean()) { ArrayList indicesToFlush = new ArrayList<>(); for (int i = 1; i < 4; i++) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java index 841f77ea7efab..704ce8ba990fa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java @@ -11,6 +11,11 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.telemetry.InstrumentType; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -19,12 +24,18 @@ import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; +import java.util.ArrayList; +import java.util.Collection; import java.util.HashSet; +import java.util.List; +import java.util.Map; import java.util.Set; import java.util.regex.Pattern; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.in; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0) public class SimpleThreadPoolIT extends ESIntegTestCase { @@ -33,6 +44,11 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { return Settings.builder().build(); } + @Override + protected Collection> nodePlugins() { + return List.of(TestTelemetryPlugin.class); + } + public void testThreadNames() throws Exception { ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); Set preNodeStartThreadNames = new HashSet<>(); @@ -95,4 +111,66 @@ public void testThreadNames() throws Exception { } } + public void testThreadPoolMetrics() throws Exception { + internalCluster().startNode(); + + final String dataNodeName = internalCluster().getRandomNodeName(); + final TestTelemetryPlugin plugin = internalCluster().getInstance(PluginsService.class, dataNodeName) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + + logger.info("do some indexing, flushing, optimize, and searches"); + int numDocs = randomIntBetween(2, 100); + IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; ++i) { + builders[i] = prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field("str_value", "s" + i) + .array("str_values", new String[] { "s" + (i * 2), "s" + (i * 2 + 1) }) + .field("l_value", i) + .array("l_values", new int[] { i * 2, i * 2 + 1 }) + .field("d_value", i) + .array("d_values", new double[] { i * 2, i * 2 + 1 }) + .endObject() + ); + } + indexRandom(true, builders); + int numSearches = randomIntBetween(2, 100); + for (int i = 0; i < numSearches; i++) { + assertNoFailures(prepareSearch("idx").setQuery(QueryBuilders.termQuery("str_value", "s" + i))); + assertNoFailures(prepareSearch("idx").setQuery(QueryBuilders.termQuery("l_value", i))); + } + final var tp = internalCluster().getInstance(ThreadPool.class, dataNodeName); + ThreadPoolStats tps = tp.stats(); + plugin.collect(); + ArrayList registeredMetrics = plugin.getRegisteredMetrics(InstrumentType.LONG_GAUGE); + registeredMetrics.addAll(plugin.getRegisteredMetrics(InstrumentType.LONG_ASYNC_COUNTER)); + tps.forEach(stats -> { + Map threadPoolMetrics = Map.of( + ThreadPool.THREAD_POOL_METRIC_NAME_COMPLETED, + stats.completed(), + ThreadPool.THREAD_POOL_METRIC_NAME_ACTIVE, + (long) stats.active(), + ThreadPool.THREAD_POOL_METRIC_NAME_CURRENT, + (long) stats.threads(), + ThreadPool.THREAD_POOL_METRIC_NAME_LARGEST, + (long) stats.largest(), + ThreadPool.THREAD_POOL_METRIC_NAME_QUEUE, + (long) stats.queue() + ); + threadPoolMetrics.forEach((suffix, value) -> { + String metricName = ThreadPool.THREAD_POOL_METRIC_PREFIX + stats.name() + suffix; + List measurements; + if (suffix.equals(ThreadPool.THREAD_POOL_METRIC_NAME_COMPLETED)) { + measurements = plugin.getLongAsyncCounterMeasurement(metricName); + } else { + measurements = plugin.getLongGaugeMeasurement(metricName); + } + assertThat(metricName, in(registeredMetrics)); + assertThat(measurements.get(0).getLong(), greaterThanOrEqualTo(value)); + }); + }); + } + } diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index 0b8cd149744e3..89082389c5805 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -204,8 +204,7 @@ static URL getElasticsearchCodeSourceLocation() { public static Build readBuild(StreamInput in) throws IOException { final String flavor; - if (in.getTransportVersion().before(TransportVersions.V_8_3_0) - || in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().before(TransportVersions.V_8_3_0) || in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { flavor = in.readString(); } else { flavor = "default"; @@ -235,7 +234,7 @@ public static Build readBuild(StreamInput in) throws IOException { version = versionMatcher.group(1); qualifier = versionMatcher.group(2); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { minWireVersion = in.readString(); minIndexVersion = in.readString(); displayString = in.readString(); @@ -252,7 +251,7 @@ public static Build readBuild(StreamInput in) throws IOException { public static void writeBuild(Build build, StreamOutput out) throws IOException { if (out.getTransportVersion().before(TransportVersions.V_8_3_0) - || out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + || out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { out.writeString(build.flavor()); } out.writeString(build.type().displayName()); @@ -266,7 +265,7 @@ public static void writeBuild(Build build, StreamOutput out) throws IOException out.writeBoolean(build.isSnapshot()); out.writeString(build.qualifiedVersion()); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { out.writeString(build.minWireCompatVersion()); out.writeString(build.minIndexCompatVersion()); out.writeString(build.displayString()); diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 50a5f7420847b..237f50befe4bd 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1838,13 +1838,13 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.http.HttpHeadersValidationException.class, org.elasticsearch.http.HttpHeadersValidationException::new, 169, - TransportVersions.V_8_500_020 + TransportVersions.V_8_9_X ), ROLE_RESTRICTION_EXCEPTION( ElasticsearchRoleRestrictionException.class, ElasticsearchRoleRestrictionException::new, 170, - TransportVersions.V_8_500_020 + TransportVersions.V_8_9_X ), API_NOT_AVAILABLE_EXCEPTION(ApiNotAvailableException.class, ApiNotAvailableException::new, 171, TransportVersions.V_8_500_065), RECOVERY_COMMIT_TOO_NEW_EXCEPTION( diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index fc43d47f29471..a730587f32c20 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -92,8 +92,8 @@ static TransportVersion def(int id) { * READ THE COMMENT BELOW THIS BLOCK OF DECLARATIONS BEFORE ADDING NEW TRANSPORT VERSIONS * Detached transport versions added below here. */ - public static final TransportVersion V_8_500_020 = def(8_500_020); - public static final TransportVersion V_8_500_061 = def(8_500_061); + public static final TransportVersion V_8_9_X = def(8_500_020); + public static final TransportVersion V_8_10_X = def(8_500_061); public static final TransportVersion V_8_500_062 = def(8_500_062); public static final TransportVersion V_8_500_063 = def(8_500_063); public static final TransportVersion V_8_500_064 = def(8_500_064); @@ -170,6 +170,7 @@ static TransportVersion def(int id) { public static final TransportVersion MISSED_INDICES_UPDATE_EXCEPTION_ADDED = def(8_558_00_0); public static final TransportVersion INFERENCE_SERVICE_EMBEDDING_SIZE_ADDED = def(8_559_00_0); public static final TransportVersion ENRICH_ELASTICSEARCH_VERSION_REMOVED = def(8_560_00_0); + public static final TransportVersion DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ_8_12_PATCH = def(8_560_00_1); public static final TransportVersion NODE_STATS_REQUEST_SIMPLIFIED = def(8_561_00_0); public static final TransportVersion TEXT_EXPANSION_TOKEN_PRUNING_CONFIG_ADDED = def(8_562_00_0); public static final TransportVersion ESQL_ASYNC_QUERY = def(8_563_00_0); @@ -184,6 +185,9 @@ static TransportVersion def(int id) { public static final TransportVersion ML_INFERENCE_REQUEST_INPUT_TYPE_ADDED = def(8_572_00_0); public static final TransportVersion ESQL_ENRICH_POLICY_CCQ_MODE = def(8_573_00_0); public static final TransportVersion DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ = def(8_574_00_0); + public static final TransportVersion PEERFINDER_REPORTS_PEERS_MASTERS = def(8_575_00_0); + public static final TransportVersion ESQL_MULTI_CLUSTERS_ENRICH = def(8_576_00_0); + public static final TransportVersion NESTED_KNN_MORE_INNER_HITS = def(8_577_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 126893bc36274..781e3f1398e32 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -160,8 +160,9 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_11_2 = new Version(8_11_02_99); public static final Version V_8_11_3 = new Version(8_11_03_99); public static final Version V_8_11_4 = new Version(8_11_04_99); - public static final Version V_8_11_5 = new Version(8_11_05_99); public static final Version V_8_12_0 = new Version(8_12_00_99); + public static final Version V_8_12_1 = new Version(8_12_01_99); + public static final Version V_8_13_0 = new Version(8_13_00_99); public static final Version CURRENT = V_8_13_0; diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java index dab46aed5b4bc..2a9449b35c7b5 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -24,9 +24,11 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.Locale; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.action.index.IndexRequest.MAX_DOCUMENT_ID_LENGTH_IN_BYTES; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; @@ -314,4 +316,19 @@ static ActionRequestValidationException validateSeqNoBasedCASParams( return validationException; } + + static ActionRequestValidationException validateDocIdLength(String id, ActionRequestValidationException validationException) { + if (id != null && id.getBytes(StandardCharsets.UTF_8).length > MAX_DOCUMENT_ID_LENGTH_IN_BYTES) { + validationException = addValidationError( + "id [" + + id + + "] is too long, must be no longer than " + + MAX_DOCUMENT_ID_LENGTH_IN_BYTES + + " bytes but was: " + + id.getBytes(StandardCharsets.UTF_8).length, + validationException + ); + } + return validationException; + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java index b8d1a431f92e8..cdb9191bd8d70 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java @@ -117,7 +117,7 @@ public NodeStats(StreamInput in) throws IOException { ingestStats = in.readOptionalWriteable(IngestStats::read); adaptiveSelectionStats = in.readOptionalWriteable(AdaptiveSelectionStats::new); indexingPressureStats = in.readOptionalWriteable(IndexingPressureStats::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { repositoriesStats = in.readOptionalWriteable(RepositoriesStats::new); } else { repositoriesStats = null; @@ -294,7 +294,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(ingestStats); out.writeOptionalWriteable(adaptiveSelectionStats); out.writeOptionalWriteable(indexingPressureStats); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(repositoriesStats); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java index 86d0206d62b65..3cba83305c0fa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java @@ -9,16 +9,17 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; /** * ActionType for cancelling running tasks */ -public class CancelTasksAction extends ActionType { +public class CancelTasksAction extends ActionType { public static final CancelTasksAction INSTANCE = new CancelTasksAction(); public static final String NAME = "cluster:admin/tasks/cancel"; private CancelTasksAction() { - super(NAME, CancelTasksResponse::new); + super(NAME, ListTasksResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java index 45fc4e352a4ba..5fdd50e0c9e66 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java @@ -8,13 +8,14 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.support.tasks.TasksRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; /** * Builder for the request to cancel tasks running on the specified nodes */ -public class CancelTasksRequestBuilder extends TasksRequestBuilder { +public class CancelTasksRequestBuilder extends TasksRequestBuilder { public CancelTasksRequestBuilder(ElasticsearchClient client) { super(client, CancelTasksAction.INSTANCE, new CancelTasksRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java deleted file mode 100644 index a53ed8dacc36c..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.node.tasks.cancel; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.tasks.TaskInfo; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.List; - -/** - * Returns the list of tasks that were cancelled - */ -public class CancelTasksResponse extends ListTasksResponse { - - private static final ConstructingObjectParser PARSER = setupParser( - "cancel_tasks_response", - CancelTasksResponse::new - ); - - public CancelTasksResponse(StreamInput in) throws IOException { - super(in); - } - - public CancelTasksResponse( - List tasks, - List taskFailures, - List nodeFailures - ) { - super(tasks, taskFailures, nodeFailures); - } - - public static CancelTasksResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java index aa7c19cf35514..1f3271be79797 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.cluster.service.ClusterService; @@ -31,7 +32,7 @@ * For a task to be cancellable it has to return an instance of * {@link CancellableTask} from {@link TransportRequest#createTask} */ -public class TransportCancelTasksAction extends TransportTasksAction { +public class TransportCancelTasksAction extends TransportTasksAction { @Inject public TransportCancelTasksAction(ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) { @@ -41,7 +42,7 @@ public TransportCancelTasksAction(ClusterService clusterService, TransportServic transportService, actionFilters, CancelTasksRequest::new, - CancelTasksResponse::new, + ListTasksResponse::new, TaskInfo::from, // Cancellation is usually lightweight, and runs on the transport thread if the task didn't even start yet, but some // implementations of CancellableTask#onCancelled() are nontrivial so we use GENERIC here. TODO could it be SAME? @@ -50,13 +51,13 @@ public TransportCancelTasksAction(ClusterService clusterService, TransportServic } @Override - protected CancelTasksResponse newResponse( + protected ListTasksResponse newResponse( CancelTasksRequest request, List tasks, List taskOperationFailures, List failedNodeExceptions ) { - return new CancelTasksResponse(tasks, taskOperationFailures, failedNodeExceptions); + return new ListTasksResponse(tasks, taskOperationFailures, failedNodeExceptions); } protected List processTasks(CancelTasksRequest request) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java index 81a26999d2907..9105c20044223 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java @@ -42,7 +42,7 @@ */ public final class AnalysisStats implements ToXContentFragment, Writeable { - private static final TransportVersion SYNONYM_SETS_VERSION = TransportVersions.V_8_500_061; + private static final TransportVersion SYNONYM_SETS_VERSION = TransportVersions.V_8_10_X; private static final Set SYNONYM_FILTER_TYPES = Set.of("synonym", "synonym_graph"); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java index 36fe688b396da..f32cd3f7e0197 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java @@ -15,7 +15,6 @@ import org.elasticsearch.script.ScriptLanguagesInfo; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; @@ -37,10 +36,6 @@ public void writeTo(StreamOutput out) throws IOException { info.writeTo(out); } - public static GetScriptLanguageResponse fromXContent(XContentParser parser) throws IOException { - return new GetScriptLanguageResponse(ScriptLanguagesInfo.fromXContent(parser)); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java index 0202a0355abb6..24604a3977096 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java @@ -13,47 +13,19 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.StoredScriptSource; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - public class GetStoredScriptResponse extends ActionResponse implements ToXContentObject { public static final ParseField _ID_PARSE_FIELD = new ParseField("_id"); public static final ParseField FOUND_PARSE_FIELD = new ParseField("found"); public static final ParseField SCRIPT = new ParseField("script"); - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "GetStoredScriptResponse", - true, - (a, c) -> { - String id = (String) a[0]; - boolean found = (Boolean) a[1]; - StoredScriptSource scriptSource = (StoredScriptSource) a[2]; - return found ? new GetStoredScriptResponse(id, scriptSource) : new GetStoredScriptResponse(id, null); - } - ); - - static { - PARSER.declareField(constructorArg(), (p, c) -> p.text(), _ID_PARSE_FIELD, ObjectParser.ValueType.STRING); - PARSER.declareField(constructorArg(), (p, c) -> p.booleanValue(), FOUND_PARSE_FIELD, ObjectParser.ValueType.BOOLEAN); - PARSER.declareField( - optionalConstructorArg(), - (p, c) -> StoredScriptSource.fromXContent(p, true), - SCRIPT, - ObjectParser.ValueType.OBJECT - ); - } - private String id; private StoredScriptSource source; @@ -103,10 +75,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static GetStoredScriptResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - @Override public void writeTo(StreamOutput out) throws IOException { if (source == null) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java index e2894f072011c..be33fada9c934 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java @@ -25,7 +25,7 @@ public class ReloadAnalyzersRequest extends BroadcastRequest { +public class ClearIndicesCacheAction extends ActionType { public static final ClearIndicesCacheAction INSTANCE = new ClearIndicesCacheAction(); public static final String NAME = "indices:admin/cache/clear"; private ClearIndicesCacheAction() { - super(NAME, ClearIndicesCacheResponse::new); + super(NAME, BroadcastResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java index 464c22d1119b0..fb6139c0ae4e3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java @@ -9,11 +9,12 @@ package org.elasticsearch.action.admin.indices.cache.clear; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.ElasticsearchClient; public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBuilder< ClearIndicesCacheRequest, - ClearIndicesCacheResponse, + BroadcastResponse, ClearIndicesCacheRequestBuilder> { public ClearIndicesCacheRequestBuilder(ElasticsearchClient client) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java deleted file mode 100644 index df0a298c87eeb..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.cache.clear; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; - -/** - * The response of a clear cache action. - */ -public class ClearIndicesCacheResponse extends BroadcastResponse { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "clear_cache", - true, - arg -> { - BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; - return new ClearIndicesCacheResponse( - response.getTotalShards(), - response.getSuccessfulShards(), - response.getFailedShards(), - Arrays.asList(response.getShardFailures()) - ); - } - ); - - static { - declareBroadcastFields(PARSER); - } - - ClearIndicesCacheResponse(StreamInput in) throws IOException { - super(in); - } - - ClearIndicesCacheResponse( - int totalShards, - int successfulShards, - int failedShards, - List shardFailures - ) { - super(totalShards, successfulShards, failedShards, shardFailures); - } - - public static ClearIndicesCacheResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index 86f0093598744..faeaf0bdb575a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -32,7 +33,7 @@ */ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAction< ClearIndicesCacheRequest, - ClearIndicesCacheResponse, + BroadcastResponse, TransportBroadcastByNodeAction.EmptyResult> { private final IndicesService indicesService; @@ -64,11 +65,11 @@ protected EmptyResult readShardResult(StreamInput in) throws IOException { } @Override - protected ResponseFactory getResponseFactory( + protected ResponseFactory getResponseFactory( ClearIndicesCacheRequest request, ClusterState clusterState ) { - return (totalShards, successfulShards, failedShards, responses, shardFailures) -> new ClearIndicesCacheResponse( + return (totalShards, successfulShards, failedShards, responses, shardFailures) -> new BroadcastResponse( totalShards, successfulShards, failedShards, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java index 3429457dd7e0f..f0596d061aeb3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java @@ -46,7 +46,7 @@ protected static void declareFields(Constructing private final String index; - protected CreateIndexResponse(StreamInput in) throws IOException { + public CreateIndexResponse(StreamInput in) throws IOException { super(in, true); index = in.readString(); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java index 27d96e5feddd5..313fb23c45a6d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java @@ -9,13 +9,14 @@ package org.elasticsearch.action.admin.indices.flush; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; -public class FlushAction extends ActionType { +public class FlushAction extends ActionType { public static final FlushAction INSTANCE = new FlushAction(); public static final String NAME = "indices:admin/flush"; private FlushAction() { - super(NAME, FlushResponse::new); + super(NAME, BroadcastResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java index 64485ad0d4496..fc326f804ce8a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java @@ -23,7 +23,6 @@ * memory heuristics in order to automatically trigger flush operations as required in order to clear memory. * * @see org.elasticsearch.client.internal.IndicesAdminClient#flush(FlushRequest) - * @see FlushResponse */ public class FlushRequest extends BroadcastRequest { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java index 4e474732e3bad..f23e247428698 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java @@ -9,9 +9,10 @@ package org.elasticsearch.action.admin.indices.flush; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.ElasticsearchClient; -public class FlushRequestBuilder extends BroadcastOperationRequestBuilder { +public class FlushRequestBuilder extends BroadcastOperationRequestBuilder { public FlushRequestBuilder(ElasticsearchClient client) { super(client, FlushAction.INSTANCE, new FlushRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java deleted file mode 100644 index 0a037ebe09f8a..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.flush; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; - -/** - * A response to flush action. - */ -public class FlushResponse extends BroadcastResponse { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("flush", true, arg -> { - BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; - return new FlushResponse( - response.getTotalShards(), - response.getSuccessfulShards(), - response.getFailedShards(), - Arrays.asList(response.getShardFailures()) - ); - }); - - static { - declareBroadcastFields(PARSER); - } - - FlushResponse(StreamInput in) throws IOException { - super(in); - } - - FlushResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { - super(totalShards, successfulShards, failedShards, shardFailures); - } - - public static FlushResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index ade775db9c755..96b4a0191b10c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; import org.elasticsearch.client.internal.node.NodeClient; @@ -28,7 +29,7 @@ */ public class TransportFlushAction extends TransportBroadcastReplicationAction< FlushRequest, - FlushResponse, + BroadcastResponse, ShardFlushRequest, ReplicationResponse> { @@ -59,12 +60,12 @@ protected ShardFlushRequest newShardRequest(FlushRequest request, ShardId shardI } @Override - protected FlushResponse newResponse( + protected BroadcastResponse newResponse( int successfulShards, int failedShards, int totalNumCopies, List shardFailures ) { - return new FlushResponse(totalNumCopies, successfulShards, failedShards, shardFailures); + return new BroadcastResponse(totalNumCopies, successfulShards, failedShards, shardFailures); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java index 3ab30298a57f5..1270365cded0d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java @@ -9,13 +9,14 @@ package org.elasticsearch.action.admin.indices.forcemerge; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; -public class ForceMergeAction extends ActionType { +public class ForceMergeAction extends ActionType { public static final ForceMergeAction INSTANCE = new ForceMergeAction(); public static final String NAME = "indices:admin/forcemerge"; private ForceMergeAction() { - super(NAME, ForceMergeResponse::new); + super(NAME, BroadcastResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java index 241f1a0c7fbf6..37075dd896b80 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -30,7 +30,6 @@ * to execute, and if so, executes it * * @see org.elasticsearch.client.internal.IndicesAdminClient#forceMerge(ForceMergeRequest) - * @see ForceMergeResponse */ public class ForceMergeRequest extends BroadcastRequest { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java index 835749751f4a6..d4c15ee799670 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.forcemerge; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.ElasticsearchClient; /** @@ -20,7 +21,7 @@ */ public class ForceMergeRequestBuilder extends BroadcastOperationRequestBuilder< ForceMergeRequest, - ForceMergeResponse, + BroadcastResponse, ForceMergeRequestBuilder> { public ForceMergeRequestBuilder(ElasticsearchClient client) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java deleted file mode 100644 index 3853a944e8676..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.forcemerge; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; - -/** - * A response for force merge action. - */ -public class ForceMergeResponse extends BroadcastResponse { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "force_merge", - true, - arg -> { - BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; - return new ForceMergeResponse( - response.getTotalShards(), - response.getSuccessfulShards(), - response.getFailedShards(), - Arrays.asList(response.getShardFailures()) - ); - } - ); - - static { - declareBroadcastFields(PARSER); - } - - ForceMergeResponse(StreamInput in) throws IOException { - super(in); - } - - public ForceMergeResponse( - int totalShards, - int successfulShards, - int failedShards, - List shardFailures - ) { - super(totalShards, successfulShards, failedShards, shardFailures); - } - - public static ForceMergeResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index a70498695e149..df98e8f12f18e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -35,7 +36,7 @@ */ public class TransportForceMergeAction extends TransportBroadcastByNodeAction< ForceMergeRequest, - ForceMergeResponse, + BroadcastResponse, TransportBroadcastByNodeAction.EmptyResult> { private final IndicesService indicesService; @@ -68,8 +69,8 @@ protected EmptyResult readShardResult(StreamInput in) throws IOException { } @Override - protected ResponseFactory getResponseFactory(ForceMergeRequest request, ClusterState clusterState) { - return (totalShards, successfulShards, failedShards, responses, shardFailures) -> new ForceMergeResponse( + protected ResponseFactory getResponseFactory(ForceMergeRequest request, ClusterState clusterState) { + return (totalShards, successfulShards, failedShards, responses, shardFailures) -> new BroadcastResponse( totalShards, successfulShards, failedShards, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java index 7d9ca67b9fa9e..f094ff75d9c41 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java @@ -9,13 +9,14 @@ package org.elasticsearch.action.admin.indices.refresh; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; -public class RefreshAction extends ActionType { +public class RefreshAction extends ActionType { public static final RefreshAction INSTANCE = new RefreshAction(); public static final String NAME = "indices:admin/refresh"; private RefreshAction() { - super(NAME, RefreshResponse::new); + super(NAME, BroadcastResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java index d0f9e99fd08ec..1f703e59980d6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java @@ -19,7 +19,6 @@ * default a refresh is scheduled periodically. * * @see org.elasticsearch.client.internal.IndicesAdminClient#refresh(RefreshRequest) - * @see RefreshResponse */ public class RefreshRequest extends BroadcastRequest { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java index 51d569dac0c30..c503ff6ca6930 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.refresh; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.ElasticsearchClient; /** @@ -16,7 +17,7 @@ * capabilities depends on the index engine used. For example, the internal one requires refresh to be called, but by * default a refresh is scheduled periodically. */ -public class RefreshRequestBuilder extends BroadcastOperationRequestBuilder { +public class RefreshRequestBuilder extends BroadcastOperationRequestBuilder { public RefreshRequestBuilder(ElasticsearchClient client) { super(client, RefreshAction.INSTANCE, new RefreshRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java deleted file mode 100644 index 5669591a17dc7..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.refresh; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; - -/** - * The response of a refresh action. - */ -public class RefreshResponse extends BroadcastResponse { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("refresh", true, arg -> { - BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; - return new RefreshResponse( - response.getTotalShards(), - response.getSuccessfulShards(), - response.getFailedShards(), - Arrays.asList(response.getShardFailures()) - ); - }); - - static { - declareBroadcastFields(PARSER); - } - - RefreshResponse(StreamInput in) throws IOException { - super(in); - } - - public RefreshResponse( - int totalShards, - int successfulShards, - int failedShards, - List shardFailures - ) { - super(totalShards, successfulShards, failedShards, shardFailures); - } - - public static RefreshResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index 7537e74e2c780..5d6f60216ae05 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; @@ -29,7 +30,7 @@ */ public class TransportRefreshAction extends TransportBroadcastReplicationAction< RefreshRequest, - RefreshResponse, + BroadcastResponse, BasicReplicationRequest, ReplicationResponse> { @@ -62,12 +63,12 @@ protected BasicReplicationRequest newShardRequest(RefreshRequest request, ShardI } @Override - protected RefreshResponse newResponse( + protected BroadcastResponse newResponse( int successfulShards, int failedShards, int totalNumCopies, List shardFailures ) { - return new RefreshResponse(totalNumCopies, successfulShards, failedShards, shardFailures); + return new BroadcastResponse(totalNumCopies, successfulShards, failedShards, shardFailures); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java index dc26e0380fe72..aa838e473bd29 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java @@ -9,14 +9,15 @@ package org.elasticsearch.action.admin.indices.shrink; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -public class ResizeAction extends ActionType { +public class ResizeAction extends ActionType { public static final ResizeAction INSTANCE = new ResizeAction(); public static final String NAME = "indices:admin/resize"; private ResizeAction() { - super(NAME, ResizeResponse::new); + super(NAME, CreateIndexResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index 71270cd61b9ed..c39d2e1114618 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -152,7 +153,7 @@ public String getSourceIndex() { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link ResizeResponse#isShardsAcknowledged()} to + * to be active before returning. Check {@link CreateIndexResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java index a4972d1a98e7d..a18de15037e49 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -8,13 +8,14 @@ package org.elasticsearch.action.admin.indices.shrink; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -public class ResizeRequestBuilder extends AcknowledgedRequestBuilder { +public class ResizeRequestBuilder extends AcknowledgedRequestBuilder { public ResizeRequestBuilder(ElasticsearchClient client) { super(client, ResizeAction.INSTANCE, new ResizeRequest()); } @@ -43,7 +44,7 @@ public ResizeRequestBuilder setSettings(Settings settings) { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link ResizeResponse#isShardsAcknowledged()} to + * to be active before returning. Check {@link CreateIndexResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java deleted file mode 100644 index 768fc18397519..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.shrink; - -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; - -/** - * A response for a resize index action, either shrink or split index. - */ -public final class ResizeResponse extends CreateIndexResponse { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "resize_index", - true, - args -> new ResizeResponse((boolean) args[0], (boolean) args[1], (String) args[2]) - ); - - static { - declareFields(PARSER); - } - - ResizeResponse(StreamInput in) throws IOException { - super(in); - } - - public ResizeResponse(boolean acknowledged, boolean shardsAcknowledged, String index) { - super(acknowledged, shardsAcknowledged, index); - } - - public static ResizeResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java index 8ce69309cf59d..7df58990b69ed 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java @@ -9,14 +9,15 @@ package org.elasticsearch.action.admin.indices.shrink; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -public class ShrinkAction extends ActionType { +public class ShrinkAction extends ActionType { public static final ShrinkAction INSTANCE = new ShrinkAction(); public static final String NAME = "indices:admin/shrink"; private ShrinkAction() { - super(NAME, ResizeResponse::new); + super(NAME, CreateIndexResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index 5686deb6b804a..fbae64dcb6d45 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; @@ -41,7 +42,7 @@ /** * Main class to initiate resizing (shrink / split) an index into a new index */ -public class TransportResizeAction extends TransportMasterNodeAction { +public class TransportResizeAction extends TransportMasterNodeAction { private final MetadataCreateIndexService createIndexService; private final Client client; @@ -86,7 +87,7 @@ protected TransportResizeAction( actionFilters, ResizeRequest::new, indexNameExpressionResolver, - ResizeResponse::new, + CreateIndexResponse::new, EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.createIndexService = createIndexService; @@ -103,7 +104,7 @@ protected void masterOperation( Task task, final ResizeRequest resizeRequest, final ClusterState state, - final ActionListener listener + final ActionListener listener ) { // there is no need to fetch docs stats for split but we keep it simple and do it anyway for simplicity of the code @@ -136,7 +137,11 @@ protected void masterOperation( createIndexService.createIndex( updateRequest, delegatedListener.map( - response -> new ResizeResponse(response.isAcknowledged(), response.isShardsAcknowledged(), updateRequest.index()) + response -> new CreateIndexResponse( + response.isAcknowledged(), + response.isShardsAcknowledged(), + updateRequest.index() + ) ) ); }) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index d0da715b17168..b6345ed0fce4a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -48,7 +48,7 @@ public class CommonStats implements Writeable, ToXContentFragment { private static final TransportVersion VERSION_SUPPORTING_NODE_MAPPINGS = TransportVersions.V_8_5_0; - private static final TransportVersion VERSION_SUPPORTING_DENSE_VECTOR_STATS = TransportVersions.V_8_500_061; + private static final TransportVersion VERSION_SUPPORTING_DENSE_VECTOR_STATS = TransportVersions.V_8_10_X; @Nullable public DocsStats docs; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardRequest.java deleted file mode 100644 index 1c3f9672f712c..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardRequest.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.stats; - -import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.tasks.CancellableTask; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskId; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Map; - -public class FieldUsageShardRequest extends BroadcastShardRequest { - - private final String[] fields; - - FieldUsageShardRequest(ShardId shardId, FieldUsageStatsRequest request) { - super(shardId, request); - this.fields = request.fields(); - } - - FieldUsageShardRequest(StreamInput in) throws IOException { - super(in); - this.fields = in.readStringArray(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(fields); - } - - @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new CancellableTask(id, type, action, "", parentTaskId, headers) { - @Override - public String getDescription() { - return FieldUsageShardRequest.this.getDescription(); - } - }; - } - - @Override - public String getDescription() { - return "get field usage for shard: [" + shardId() + "], fields: " + Arrays.toString(fields); - } - - public String[] fields() { - return fields; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java index f90dc894f1b57..477a0bd910719 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java @@ -61,7 +61,7 @@ public ShardStats(StreamInput in) throws IOException { isCustomDataPath = in.readBoolean(); seqNoStats = in.readOptionalWriteable(SeqNoStats::new); retentionLeaseStats = in.readOptionalWriteable(RetentionLeaseStats::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { isSearchIdle = in.readBoolean(); searchIdleTime = in.readVLong(); } else { @@ -215,7 +215,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isCustomDataPath); out.writeOptionalWriteable(seqNoStats); out.writeOptionalWriteable(retentionLeaseStats); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(isSearchIdle); out.writeVLong(searchIdleTime); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java index ae73904a8447b..1e0a36cfc1a99 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -57,7 +57,7 @@ public Request(String name) { public Request(StreamInput in) throws IOException { super(in); name = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { includeDefaults = in.readBoolean(); } else { includeDefaults = false; @@ -68,7 +68,7 @@ public Request(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(name); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(includeDefaults); } } @@ -121,7 +121,7 @@ public static class Response extends ActionResponse implements ToXContentObject public Response(StreamInput in) throws IOException { super(in); componentTemplates = in.readMap(ComponentTemplate::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); } else { rolloverConfiguration = null; @@ -149,7 +149,7 @@ public RolloverConfiguration getRolloverConfiguration() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(componentTemplates, StreamOutput::writeWriteable); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index f2c041c2c71bc..8401a510a1482 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -58,7 +58,7 @@ public Request(@Nullable String name) { public Request(StreamInput in) throws IOException { super(in); name = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { includeDefaults = in.readBoolean(); } else { includeDefaults = false; @@ -69,7 +69,7 @@ public Request(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(name); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(includeDefaults); } } @@ -123,7 +123,7 @@ public static class Response extends ActionResponse implements ToXContentObject public Response(StreamInput in) throws IOException { super(in); indexTemplates = in.readMap(ComposableIndexTemplate::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); } else { rolloverConfiguration = null; @@ -147,7 +147,7 @@ public Map indexTemplates() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(indexTemplates, StreamOutput::writeWriteable); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java index 6b71be3925478..9281c6d3dd0bc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java @@ -40,7 +40,7 @@ public SimulateIndexTemplateRequest(StreamInput in) throws IOException { super(in); indexName = in.readString(); indexTemplateRequest = in.readOptionalWriteable(TransportPutComposableIndexTemplateAction.Request::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { includeDefaults = in.readBoolean(); } } @@ -50,7 +50,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(indexName); out.writeOptionalWriteable(indexTemplateRequest); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(includeDefaults); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index b7cc8564be062..106f1a7e4f393 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -73,7 +73,7 @@ public SimulateIndexTemplateResponse(StreamInput in) throws IOException { } else { this.overlappingTemplates = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); } } @@ -91,7 +91,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java index 7f637527a6a1f..a1148695ba6d6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java @@ -63,7 +63,7 @@ public Request(StreamInput in) throws IOException { super(in); templateName = in.readOptionalString(); indexTemplateRequest = in.readOptionalWriteable(TransportPutComposableIndexTemplateAction.Request::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { includeDefaults = in.readBoolean(); } } @@ -73,7 +73,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(templateName); out.writeOptionalWriteable(indexTemplateRequest); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(includeDefaults); } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 073ac021f787a..f591cc22d19a8 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -75,7 +75,7 @@ public Request(StreamInput in) throws IOException { super(in); this.names = in.readOptionalStringArray(); this.indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { this.includeDefaults = in.readBoolean(); } else { this.includeDefaults = false; @@ -87,7 +87,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalStringArray(names); indicesOptions.writeIndicesOptions(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(includeDefaults); } } @@ -481,9 +481,7 @@ public Response(List dataStreams, @Nullable RolloverConfiguratio public Response(StreamInput in) throws IOException { this( in.readCollectionAsList(DataStreamInfo::new), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) - ? in.readOptionalWriteable(RolloverConfiguration::new) - : null + in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) ? in.readOptionalWriteable(RolloverConfiguration::new) : null ); } @@ -499,7 +497,7 @@ public RolloverConfiguration getRolloverConfiguration() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeCollection(dataStreams); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java index 61c979f9494b5..5875ab5089d92 100644 --- a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java @@ -62,7 +62,7 @@ public Request(StreamInput in) throws IOException { super(in); sourceIndex = in.readString(); targetIndex = in.readString(); - waitTimeout = in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) + waitTimeout = in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X) ? TimeValue.parseTimeValue(in.readString(), "timeout") : DEFAULT_WAIT_TIMEOUT; downsampleConfig = new DownsampleConfig(in); @@ -89,7 +89,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(sourceIndex); out.writeString(targetIndex); out.writeString( - out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) + out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X) ? waitTimeout.getStringRep() : DEFAULT_WAIT_TIMEOUT.getStringRep() ); diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java index e91329e810397..4f548e227dcfb 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java @@ -15,30 +15,20 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.core.RestApiVersion; -import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.rest.action.document.RestMultiGetAction; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParser.Token; import java.io.IOException; -import java.util.ArrayList; import java.util.Iterator; -import java.util.List; public class MultiGetResponse extends ActionResponse implements Iterable, ToXContentObject { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(MultiGetResponse.class); - private static final ParseField INDEX = new ParseField("_index"); - private static final ParseField TYPE = new ParseField("_type"); - private static final ParseField ID = new ParseField("_id"); - private static final ParseField ERROR = new ParseField("error"); - private static final ParseField DOCS = new ParseField("docs"); + static final ParseField INDEX = new ParseField("_index"); + static final ParseField ID = new ParseField("_id"); + static final ParseField DOCS = new ParseField("docs"); /** * Represents a failure. @@ -151,80 +141,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static MultiGetResponse fromXContent(XContentParser parser) throws IOException { - String currentFieldName = null; - List items = new ArrayList<>(); - for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { - switch (token) { - case FIELD_NAME: - currentFieldName = parser.currentName(); - break; - case START_ARRAY: - if (DOCS.getPreferredName().equals(currentFieldName)) { - for (token = parser.nextToken(); token != Token.END_ARRAY; token = parser.nextToken()) { - if (token == Token.START_OBJECT) { - items.add(parseItem(parser)); - } - } - } - break; - default: - // If unknown tokens are encounter then these should be ignored, because - // this is parsing logic on the client side. - break; - } - } - return new MultiGetResponse(items.toArray(new MultiGetItemResponse[0])); - } - - private static MultiGetItemResponse parseItem(XContentParser parser) throws IOException { - String currentFieldName = null; - String index = null; - String id = null; - ElasticsearchException exception = null; - GetResult getResult = null; - for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { - switch (token) { - case FIELD_NAME: - currentFieldName = parser.currentName(); - if (INDEX.match(currentFieldName, parser.getDeprecationHandler()) == false - && ID.match(currentFieldName, parser.getDeprecationHandler()) == false - && ERROR.match(currentFieldName, parser.getDeprecationHandler()) == false) { - getResult = GetResult.fromXContentEmbedded(parser, index, id); - } - break; - case VALUE_STRING: - if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { - index = parser.text(); - } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { - deprecationLogger.compatibleCritical("mget_with_types", RestMultiGetAction.TYPES_DEPRECATION_MESSAGE); - } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { - id = parser.text(); - } - break; - case START_OBJECT: - if (ERROR.match(currentFieldName, parser.getDeprecationHandler())) { - exception = ElasticsearchException.fromXContent(parser); - } - break; - default: - // If unknown tokens are encounter then these should be ignored, because - // this is parsing logic on the client side. - break; - } - if (getResult != null) { - break; - } - } - - if (exception != null) { - return new MultiGetItemResponse(null, new Failure(index, id, exception)); - } else { - GetResponse getResponse = new GetResponse(getResult); - return new MultiGetItemResponse(getResponse, null); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(responses); diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 12f7c21cba8e1..285346adcd13f 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -43,7 +43,6 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -73,7 +72,7 @@ public class IndexRequest extends ReplicatedWriteRequest implements DocWriteRequest, CompositeIndicesRequest { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(IndexRequest.class); - private static final TransportVersion PIPELINES_HAVE_RUN_FIELD_ADDED = TransportVersions.V_8_500_061; + private static final TransportVersion PIPELINES_HAVE_RUN_FIELD_ADDED = TransportVersions.V_8_10_X; /** * Max length of the source document to include into string() @@ -266,17 +265,7 @@ public ActionRequestValidationException validate() { validationException = DocWriteRequest.validateSeqNoBasedCASParams(this, validationException); - if (id != null && id.getBytes(StandardCharsets.UTF_8).length > MAX_DOCUMENT_ID_LENGTH_IN_BYTES) { - validationException = addValidationError( - "id [" - + id - + "] is too long, must be no longer than " - + MAX_DOCUMENT_ID_LENGTH_IN_BYTES - + " bytes but was: " - + id.getBytes(StandardCharsets.UTF_8).length, - validationException - ); - } + validationException = DocWriteRequest.validateDocIdLength(id, validationException); if (pipeline != null && pipeline.isEmpty()) { validationException = addValidationError("pipeline cannot be an empty string", validationException); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java index f5dcb83fa36fc..396a5b63b3cd5 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java @@ -8,80 +8,22 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParser.Token; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; - public class SimulatePipelineResponse extends ActionResponse implements ToXContentObject { private String pipelineId; private boolean verbose; private List results; - @SuppressWarnings("unchecked") - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "simulate_pipeline_response", - true, - a -> { - List results = (List) a[0]; - boolean verbose = false; - if (results.size() > 0) { - if (results.get(0) instanceof SimulateDocumentVerboseResult) { - verbose = true; - } - } - return new SimulatePipelineResponse(null, verbose, results); - } - ); - static { - PARSER.declareObjectArray(constructorArg(), (parser, context) -> { - Token token = parser.currentToken(); - ensureExpectedToken(Token.START_OBJECT, token, parser); - SimulateDocumentResult result = null; - while ((token = parser.nextToken()) != Token.END_OBJECT) { - ensureExpectedToken(Token.FIELD_NAME, token, parser); - String fieldName = parser.currentName(); - token = parser.nextToken(); - if (token == Token.START_ARRAY) { - if (fieldName.equals(SimulateDocumentVerboseResult.PROCESSOR_RESULT_FIELD)) { - List results = new ArrayList<>(); - while ((token = parser.nextToken()) == Token.START_OBJECT) { - results.add(SimulateProcessorResult.fromXContent(parser)); - } - ensureExpectedToken(Token.END_ARRAY, token, parser); - result = new SimulateDocumentVerboseResult(results); - } else { - parser.skipChildren(); - } - } else if (token.equals(Token.START_OBJECT)) { - switch (fieldName) { - case WriteableIngestDocument.DOC_FIELD -> result = new SimulateDocumentBaseResult( - WriteableIngestDocument.INGEST_DOC_PARSER.apply(parser, null).getIngestDocument() - ); - case "error" -> result = new SimulateDocumentBaseResult(ElasticsearchException.fromXContent(parser)); - default -> parser.skipChildren(); - } - } // else it is a value skip it - } - assert result != null; - return result; - }, new ParseField(Fields.DOCUMENTS)); - } - public SimulatePipelineResponse(StreamInput in) throws IOException { super(in); this.pipelineId = in.readOptionalString(); @@ -136,10 +78,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static SimulatePipelineResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - static final class Fields { static final String DOCUMENTS = "docs"; } diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 591b9a86cda20..1da114adb34f6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -172,7 +172,7 @@ abstract class AbstractSearchAsyncAction exten this.results = resultConsumer; // register the release of the query consumer to free up the circuit breaker memory // at the end of the search - addReleasable(resultConsumer::decRef); + addReleasable(resultConsumer); this.clusters = clusters; } diff --git a/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java b/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java index b4fd0107f731f..96f10d7d8a30e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java +++ b/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java @@ -9,11 +9,11 @@ package org.elasticsearch.action.search; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.core.AbstractRefCounted; -import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.transport.LeakTracker; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Stream; /** @@ -22,7 +22,13 @@ class ArraySearchPhaseResults extends SearchPhaseResults { final AtomicArray results; - private final RefCounted refCounted = LeakTracker.wrap(AbstractRefCounted.of(this::doClose)); + private final AtomicBoolean closed = new AtomicBoolean(false); + + private final Releasable releasable = LeakTracker.wrap(() -> { + for (Result result : getAtomicArray().asList()) { + result.decRef(); + } + }); ArraySearchPhaseResults(int size) { super(size); @@ -41,12 +47,16 @@ void consumeResult(Result result, Runnable next) { next.run(); } - protected void doClose() { - for (Result result : getAtomicArray().asList()) { - result.decRef(); + @Override + public final void close() { + if (closed.compareAndSet(false, true)) { + releasable.close(); + doClose(); } } + protected void doClose() {} + boolean hasResult(int shardIndex) { return results.get(shardIndex) != null; } @@ -55,24 +65,4 @@ boolean hasResult(int shardIndex) { AtomicArray getAtomicArray() { return results; } - - @Override - public void incRef() { - refCounted.incRef(); - } - - @Override - public boolean tryIncRef() { - return refCounted.tryIncRef(); - } - - @Override - public boolean decRef() { - return refCounted.decRef(); - } - - @Override - public boolean hasReferences() { - return refCounted.hasReferences(); - } } diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index 9900ee9d824ae..52f41179795d6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -482,24 +482,7 @@ Stream getSuccessfulResults() { } @Override - public void incRef() { - - } - - @Override - public boolean tryIncRef() { - return false; - } - - @Override - public boolean decRef() { - return false; - } - - @Override - public boolean hasReferences() { - return false; - } + public void close() {} } private GroupShardsIterator getIterator( diff --git a/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java index 13972ea2bf64a..2c4cb31584323 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java @@ -104,20 +104,5 @@ AtomicArray getAtomicArray() { } @Override - public void incRef() {} - - @Override - public boolean tryIncRef() { - return true; - } - - @Override - public boolean decRef() { - return true; - } - - @Override - public boolean hasReferences() { - return false; - } + public void close() {} } diff --git a/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java b/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java index 3a12b72570caf..0e6830dcfab0e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java +++ b/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java @@ -25,7 +25,6 @@ final class CountedCollector { CountedCollector(SearchPhaseResults resultConsumer, int expectedOps, Runnable onFinish, SearchPhaseContext context) { this.resultConsumer = resultConsumer; - resultConsumer.incRef(); this.counter = new CountDown(expectedOps); this.onFinish = onFinish; this.context = context; @@ -38,11 +37,7 @@ final class CountedCollector { void countDown() { assert counter.isCountedDown() == false : "more operations executed than specified"; if (counter.countDown()) { - try { - onFinish.run(); - } finally { - resultConsumer.decRef(); - } + onFinish.run(); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index 54408cd560314..0c9d6ba12a27a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -66,7 +66,7 @@ final class DfsQueryPhase extends SearchPhase { // register the release of the query consumer to free up the circuit breaker memory // at the end of the search - context.addReleasable(queryResult::decRef); + context.addReleasable(queryResult); } @Override @@ -151,7 +151,11 @@ ShardSearchRequest rewriteShardSearchRequest(ShardSearchRequest request) { } scoreDocs.sort(Comparator.comparingInt(scoreDoc -> scoreDoc.doc)); String nestedPath = dfsKnnResults.getNestedPath(); - QueryBuilder query = new KnnScoreDocQueryBuilder(scoreDocs.toArray(new ScoreDoc[0])); + QueryBuilder query = new KnnScoreDocQueryBuilder( + scoreDocs.toArray(new ScoreDoc[0]), + source.knnSearch().get(i).getField(), + source.knnSearch().get(i).getQueryVector() + ).boost(source.knnSearch().get(i).boost()); if (nestedPath != null) { query = new NestedQueryBuilder(nestedPath, query, ScoreMode.Max).innerHit(source.knnSearch().get(i).innerHit()); } diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index 00e2b41fde3da..7741c1483f69a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -96,6 +96,7 @@ public void run() { hit.setInnerHits(Maps.newMapWithExpectedSize(innerHitBuilders.size())); } hit.getInnerHits().put(innerHitBuilder.getName(), innerHits); + innerHits.mustIncRef(); } } onPhaseDone(); diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 11528f8e1521f..1f06158951392 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -38,11 +38,16 @@ final class FetchSearchPhase extends SearchPhase { private final AggregatedDfs aggregatedDfs; FetchSearchPhase(SearchPhaseResults resultConsumer, AggregatedDfs aggregatedDfs, SearchPhaseContext context) { - this(resultConsumer, aggregatedDfs, context, (response, queryPhaseResults) -> { - response.mustIncRef(); - context.addReleasable(response::decRef); - return new ExpandSearchPhase(context, response.hits, () -> new FetchLookupFieldsPhase(context, response, queryPhaseResults)); - }); + this( + resultConsumer, + aggregatedDfs, + context, + (response, queryPhaseResults) -> new ExpandSearchPhase( + context, + response.hits, + () -> new FetchLookupFieldsPhase(context, response, queryPhaseResults) + ) + ); } FetchSearchPhase( @@ -61,7 +66,7 @@ final class FetchSearchPhase extends SearchPhase { ); } this.fetchResults = new ArraySearchPhaseResults<>(resultConsumer.getNumShards()); - context.addReleasable(fetchResults::decRef); + context.addReleasable(fetchResults); this.queryResults = resultConsumer.getAtomicArray(); this.aggregatedDfs = aggregatedDfs; this.nextPhaseFactory = nextPhaseFactory; @@ -225,10 +230,8 @@ private void moveToNextPhase( AtomicArray fetchResultsArr ) { var resp = SearchPhaseController.merge(context.getRequest().scroll() != null, reducedQueryPhase, fetchResultsArr); - try { - context.executeNextPhase(this, nextPhaseFactory.apply(resp, queryResults)); - } finally { - resp.decRef(); - } + context.addReleasable(resp::decRef); + fetchResults.close(); + context.executeNextPhase(this, nextPhaseFactory.apply(resp, queryResults)); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java index 39813a883c428..874437311d086 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java @@ -54,7 +54,7 @@ public OpenPointInTimeRequest(StreamInput in) throws IOException { this.keepAlive = in.readTimeValue(); this.routing = in.readOptionalString(); this.preference = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { this.maxConcurrentShardRequests = in.readVInt(); } if (in.getTransportVersion().onOrAfter(TransportVersions.PIT_WITH_INDEX_FILTER)) { @@ -70,7 +70,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeTimeValue(keepAlive); out.writeOptionalString(routing); out.writeOptionalString(preference); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeVInt(maxConcurrentShardRequests); } if (out.getTransportVersion().onOrAfter(TransportVersions.PIT_WITH_INDEX_FILTER)) { diff --git a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java index b7b113601560b..34ee0fc146aa5 100644 --- a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java @@ -105,11 +105,7 @@ public QueryPhaseResultConsumer( @Override protected void doClose() { - try { - super.doClose(); - } finally { - pendingMerges.close(); - } + pendingMerges.close(); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 6fcfc97c33c9e..fcc848384866a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -68,7 +68,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction clusters ); this.queryPhaseResultConsumer = queryPhaseResultConsumer; - addReleasable(queryPhaseResultConsumer::decRef); + addReleasable(queryPhaseResultConsumer); this.progressListener = task.getProgressListener(); // don't build the SearchShard list (can be expensive) if the SearchProgressListener won't use it if (progressListener != SearchProgressListener.NOOP) { @@ -95,7 +95,6 @@ protected SearchPhase getNextPhase(final SearchPhaseResults res final List dfsSearchResults = results.getAtomicArray().asList(); final AggregatedDfs aggregatedDfs = SearchPhaseController.aggregateDfs(dfsSearchResults); final List mergedKnnResults = SearchPhaseController.mergeKnnResults(getRequest(), dfsSearchResults); - queryPhaseResultConsumer.incRef(); return new DfsQueryPhase( dfsSearchResults, aggregatedDfs, diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 5ffb9024d3ee1..6cfea93068a86 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -364,11 +364,15 @@ public static SearchResponseSections merge( } ScoreDoc[] sortedDocs = reducedQueryPhase.sortedTopDocs.scoreDocs; var fetchResults = fetchResultsArray.asList(); - SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, fetchResultsArray); - if (reducedQueryPhase.suggest != null && fetchResults.isEmpty() == false) { - mergeSuggest(reducedQueryPhase, fetchResultsArray, hits, sortedDocs); + final SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, fetchResultsArray); + try { + if (reducedQueryPhase.suggest != null && fetchResults.isEmpty() == false) { + mergeSuggest(reducedQueryPhase, fetchResultsArray, hits, sortedDocs); + } + return reducedQueryPhase.buildResponse(hits, fetchResults); + } finally { + hits.decRef(); } - return reducedQueryPhase.buildResponse(hits, fetchResults); } private static void mergeSuggest( @@ -462,6 +466,7 @@ private static SearchHits getHits( searchHit.score(shardDoc.score); } hits.add(searchHit); + searchHit.incRef(); } } return new SearchHits( diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java index 11b8e0a0792a3..28606ecc09f90 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; import org.elasticsearch.search.SearchPhaseResult; import java.util.stream.Stream; @@ -17,7 +17,7 @@ /** * This class acts as a basic result collection that can be extended to do on-the-fly reduction or result processing */ -abstract class SearchPhaseResults implements RefCounted { +abstract class SearchPhaseResults implements Releasable { private final int numShards; SearchPhaseResults(int numShards) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java b/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java index 096f2606d3f02..f5d280a01257c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java @@ -104,6 +104,15 @@ protected void onFetchResult(int shardIndex) {} */ protected void onFetchFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) {} + /** + * Indicates that a cluster has finished a search operation. Used for CCS minimize_roundtrips=true only. + * + * @param clusterAlias alias of cluster that has finished a search operation and returned a SearchResponse. + * The cluster alias for the local cluster is RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY. + * @param searchResponse SearchResponse from cluster 'clusterAlias' + */ + protected void onClusterResponseMinimizeRoundtrips(String clusterAlias, SearchResponse searchResponse) {} + final void notifyListShards( List shards, List skippedShards, @@ -167,6 +176,14 @@ final void notifyFetchFailure(int shardIndex, SearchShardTarget shardTarget, Exc } } + final void notifyClusterResponseMinimizeRoundtrips(String clusterAlias, SearchResponse searchResponse) { + try { + onClusterResponseMinimizeRoundtrips(clusterAlias, searchResponse); + } catch (Exception e) { + logger.warn(() -> "[" + clusterAlias + "] Failed to execute progress listener onResponseMinimizeRoundtrips", e); + } + } + static List buildSearchShards(List results) { return results.stream() .filter(Objects::nonNull) diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 660fdb38b130b..84b9dc745ed92 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -20,7 +20,9 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestActions; @@ -30,6 +32,7 @@ import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; @@ -81,9 +84,16 @@ public class SearchResponse extends ActionResponse implements ChunkedToXContentO private final Clusters clusters; private final long tookInMillis; + private final RefCounted refCounted = LeakTracker.wrap(new AbstractRefCounted() { + @Override + protected void closeInternal() { + hits.decRef(); + } + }); + public SearchResponse(StreamInput in) throws IOException { super(in); - this.hits = new SearchHits(in); + this.hits = SearchHits.readFrom(in, true); this.aggregations = in.readBoolean() ? InternalAggregations.readFrom(in) : null; this.suggest = in.readBoolean() ? new Suggest(in) : null; this.timedOut = in.readBoolean(); @@ -191,6 +201,7 @@ public SearchResponse( String pointInTimeId ) { this.hits = hits; + hits.incRef(); this.aggregations = aggregations; this.suggest = suggest; this.profileResults = profileResults; @@ -210,6 +221,26 @@ public SearchResponse( : "SearchResponse can't have both scrollId [" + scrollId + "] and searchContextId [" + pointInTimeId + "]"; } + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + public RestStatus status() { return RestStatus.status(successfulShards, totalShards, shardFailures); } @@ -218,6 +249,7 @@ public RestStatus status() { * The search hits. */ public SearchHits getHits() { + assert hasReferences(); return hits; } @@ -344,6 +376,7 @@ public Clusters getClusters() { @Override public Iterator toXContentChunked(ToXContent.Params params) { + assert hasReferences(); return Iterators.concat( ChunkedToXContentHelper.startObject(), this.innerToXContentChunked(params), @@ -493,6 +526,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE } } } + return new SearchResponse( hits, aggs, @@ -514,6 +548,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE @Override public void writeTo(StreamOutput out) throws IOException { + assert hasReferences(); hits.writeTo(out); out.writeOptionalWriteable((InternalAggregations) aggregations); out.writeOptionalWriteable(suggest); @@ -537,7 +572,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return Strings.toString(this); + return hasReferences() == false ? "SearchResponse[released]" : Strings.toString(this); } /** @@ -632,7 +667,7 @@ public Clusters(StreamInput in) throws IOException { this.total = in.readVInt(); int successfulTemp = in.readVInt(); int skippedTemp = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { List clusterList = in.readCollectionAsList(Cluster::new); if (clusterList.isEmpty()) { this.clusterInfo = Collections.emptyMap(); @@ -685,7 +720,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(total); out.writeVInt(successful); out.writeVInt(skipped); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { if (clusterInfo != null) { List clusterList = clusterInfo.values().stream().toList(); out.writeCollection(clusterList); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index 1b616b9f3bc87..9db9d65bc3dac 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -65,7 +65,7 @@ // TODO it may make sense to integrate the remote clusters responses as a shard response in the initial search phase and ignore hits coming // from the remote clusters in the fetch phase. This would be identical to the removed QueryAndFetch strategy except that only the remote // cluster response would have the fetch results. -final class SearchResponseMerger implements Releasable { +public final class SearchResponseMerger implements Releasable { final int from; final int size; final int trackTotalHitsUpTo; @@ -98,7 +98,7 @@ final class SearchResponseMerger implements Releasable { * Merges currently happen at once when all responses are available and {@link #getMergedResponse(Clusters)} )} is called. * That may change in the future as it's possible to introduce incremental merges as responses come in if necessary. */ - void add(SearchResponse searchResponse) { + public void add(SearchResponse searchResponse) { assert searchResponse.getScrollId() == null : "merging scroll results is not supported"; searchResponse.mustIncRef(); searchResponses.add(searchResponse); @@ -109,10 +109,13 @@ int numResponses() { } /** - * Returns the merged response. To be called once all responses have been added through {@link #add(SearchResponse)} - * so that all responses are merged into a single one. + * Returns the merged response of all SearchResponses received so far. Can be called at any point, + * including when only some clusters have finished, in order to get "incremental" partial results. + * @param clusters The Clusters object for the search to report on the status of each cluster + * involved in the cross-cluster search + * @return merged response */ - SearchResponse getMergedResponse(Clusters clusters) { + public SearchResponse getMergedResponse(Clusters clusters) { // if the search is only across remote clusters, none of them are available, and all of them have skip_unavailable set to true, // we end up calling merge without anything to merge, we just return an empty search response if (searchResponses.size() == 0) { @@ -201,33 +204,37 @@ SearchResponse getMergedResponse(Clusters clusters) { setTopDocsShardIndex(shards, topDocsList); TopDocs topDocs = mergeTopDocs(topDocsList, size, from); SearchHits mergedSearchHits = topDocsToSearchHits(topDocs, topDocsStats); - setSuggestShardIndex(shards, groupedSuggestions); - Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions)); - InternalAggregations reducedAggs = aggs.isEmpty() - ? InternalAggregations.EMPTY - : InternalAggregations.topLevelReduce(aggs, aggReduceContextBuilder.forFinalReduction()); - ShardSearchFailure[] shardFailures = failures.toArray(ShardSearchFailure.EMPTY_ARRAY); - SearchProfileResults profileShardResults = profileResults.isEmpty() ? null : new SearchProfileResults(profileResults); - // make failures ordering consistent between ordinary search and CCS by looking at the shard they come from - Arrays.sort(shardFailures, FAILURES_COMPARATOR); - long tookInMillis = searchTimeProvider.buildTookInMillis(); - return new SearchResponse( - mergedSearchHits, - reducedAggs, - suggest, - topDocsStats.timedOut, - topDocsStats.terminatedEarly, - profileShardResults, - numReducePhases, - null, - totalShards, - successfulShards, - skippedShards, - tookInMillis, - shardFailures, - clusters, - null - ); + try { + setSuggestShardIndex(shards, groupedSuggestions); + Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions)); + InternalAggregations reducedAggs = aggs.isEmpty() + ? InternalAggregations.EMPTY + : InternalAggregations.topLevelReduce(aggs, aggReduceContextBuilder.forFinalReduction()); + ShardSearchFailure[] shardFailures = failures.toArray(ShardSearchFailure.EMPTY_ARRAY); + SearchProfileResults profileShardResults = profileResults.isEmpty() ? null : new SearchProfileResults(profileResults); + // make failures ordering consistent between ordinary search and CCS by looking at the shard they come from + Arrays.sort(shardFailures, FAILURES_COMPARATOR); + long tookInMillis = searchTimeProvider.buildTookInMillis(); + return new SearchResponse( + mergedSearchHits, + reducedAggs, + suggest, + topDocsStats.timedOut, + topDocsStats.terminatedEarly, + profileShardResults, + numReducePhases, + null, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardFailures, + clusters, + null + ); + } finally { + mergedSearchHits.decRef(); + } } private static final Comparator FAILURES_COMPARATOR = new Comparator() { @@ -373,6 +380,7 @@ private static SearchHits topDocsToSearchHits(TopDocs topDocs, TopDocsStats topD for (int i = 0; i < topDocs.scoreDocs.length; i++) { FieldDocAndSearchHit scoreDoc = (FieldDocAndSearchHit) topDocs.scoreDocs[i]; searchHits[i] = scoreDoc.searchHit; + scoreDoc.searchHit.mustIncRef(); } } SortField[] sortFields = null; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java index 805ef033db27a..d52a585b3e792 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java @@ -64,6 +64,7 @@ public SearchResponseSections( int numReducePhases ) { this.hits = hits; + hits.incRef(); this.aggregations = aggregations; this.suggest = suggest; this.profileResults = profileResults; @@ -73,7 +74,7 @@ public SearchResponseSections( refCounted = hits.getHits().length > 0 ? LeakTracker.wrap(new AbstractRefCounted() { @Override protected void closeInternal() { - // TODO: noop until hits are ref counted + hits.decRef(); } }) : ALWAYS_REFERENCED; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTask.java b/server/src/main/java/org/elasticsearch/action/search/SearchTask.java index b7e8de3b97b03..3bf72313c4c21 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTask.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTask.java @@ -21,6 +21,7 @@ public class SearchTask extends CancellableTask { // generating description in a lazy way since source can be quite big private final Supplier descriptionSupplier; private SearchProgressListener progressListener = SearchProgressListener.NOOP; + private Supplier searchResponseMergerSupplier; // used for CCS minimize_roundtrips=true public SearchTask( long id, @@ -53,4 +54,19 @@ public final SearchProgressListener getProgressListener() { return progressListener; } + /** + * @return the Supplier of {@link SearchResponseMerger} attached to this task. Will be null + * for local-only search and cross-cluster searches with minimize_roundtrips=false. + */ + public Supplier getSearchResponseMergerSupplier() { + return searchResponseMergerSupplier; + } + + /** + * @param supplier Attach a Supplier of {@link SearchResponseMerger} to this task. + * For use with CCS minimize_roundtrips=true + */ + public void setSearchResponseMergerSupplier(Supplier supplier) { + this.searchResponseMergerSupplier = supplier; + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 727e576764102..e42ac1f4794ff 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -362,6 +362,7 @@ void executeRequest( .notifyListShards(Collections.emptyList(), Collections.emptyList(), clusters, false, timeProvider); } ccsRemoteReduce( + task, parentTaskId, rewritten, localIndices, @@ -496,6 +497,7 @@ public static boolean shouldMinimizeRoundtrips(SearchRequest searchRequest) { * Handles ccs_minimize_roundtrips=true */ static void ccsRemoteReduce( + SearchTask task, TaskId parentTaskId, SearchRequest searchRequest, OriginalIndices localIndices, @@ -532,7 +534,6 @@ static void ccsRemoteReduce( remoteClusterClient.search(ccsSearchRequest, new ActionListener<>() { @Override public void onResponse(SearchResponse searchResponse) { - // TODO: in CCS fail fast ticket we may need to fail the query if the cluster is marked as FAILED // overwrite the existing cluster entry with the updated one ccsClusterInfoUpdate(searchResponse, clusters, clusterAlias, skipUnavailable); Map profileResults = searchResponse.getProfileResults(); @@ -580,6 +581,9 @@ public void onFailure(Exception e) { timeProvider, aggReduceContextBuilder ); + task.setSearchResponseMergerSupplier( + () -> createSearchResponseMerger(searchRequest.source(), timeProvider, aggReduceContextBuilder) + ); final AtomicReference exceptions = new AtomicReference<>(); int totalClusters = remoteIndices.size() + (localIndices == null ? 0 : 1); final CountDown countDown = new CountDown(totalClusters); @@ -602,6 +606,7 @@ public void onFailure(Exception e) { exceptions, searchResponseMerger, clusters, + task.getProgressListener(), listener ); Client remoteClusterClient = remoteClusterService.getRemoteClusterClient( @@ -619,6 +624,7 @@ public void onFailure(Exception e) { exceptions, searchResponseMerger, clusters, + task.getProgressListener(), listener ); SearchRequest ccsLocalSearchRequest = SearchRequest.subSearchRequest( @@ -710,7 +716,7 @@ Map createFinalResponse() { final String[] indices = entry.getValue().indices(); final Executor responseExecutor = transportService.getThreadPool().executor(ThreadPool.Names.SEARCH_COORDINATION); // TODO: support point-in-time - if (searchContext == null && connection.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (searchContext == null && connection.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { SearchShardsRequest searchShardsRequest = new SearchShardsRequest( indices, indicesOptions, @@ -759,6 +765,7 @@ private static ActionListener createCCSListener( AtomicReference exceptions, SearchResponseMerger searchResponseMerger, SearchResponse.Clusters clusters, + SearchProgressListener progressListener, ActionListener originalListener ) { return new CCSActionListener<>( @@ -771,9 +778,9 @@ private static ActionListener createCCSListener( ) { @Override void innerOnResponse(SearchResponse searchResponse) { - // TODO: in CCS fail fast ticket we may need to fail the query if the cluster gets marked as FAILED ccsClusterInfoUpdate(searchResponse, clusters, clusterAlias, skipUnavailable); searchResponseMerger.add(searchResponse); + progressListener.notifyClusterResponseMinimizeRoundtrips(clusterAlias, searchResponse); } @Override @@ -1494,7 +1501,6 @@ public final void onFailure(Exception e) { if (cluster != null) { ccsClusterInfoUpdate(f, clusters, clusterAlias, true); } - // skippedClusters.incrementAndGet(); } else { if (cluster != null) { ccsClusterInfoUpdate(f, clusters, clusterAlias, false); diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java index 52b4c00175fa8..b69b87190f2a7 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java @@ -42,7 +42,7 @@ public class BaseBroadcastResponse extends ActionResponse { private final DefaultShardOperationFailedException[] shardFailures; @SuppressWarnings("unchecked") - protected static void declareBroadcastFields(ConstructingObjectParser PARSER) { + public static void declareBroadcastFields(ConstructingObjectParser PARSER) { ConstructingObjectParser shardsParser = new ConstructingObjectParser<>( "_shards", true, diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java index bf8376cfc5481..312a9843c9e2b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java @@ -46,7 +46,7 @@ public BroadcastUnpromotableRequest(StreamInput in) throws IOException { indexShardRoutingTable = null; shardId = new ShardId(in); indices = new String[] { shardId.getIndex().getName() }; - failShardOnError = in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) && in.readBoolean(); + failShardOnError = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) && in.readBoolean(); } public BroadcastUnpromotableRequest(IndexShardRoutingTable indexShardRoutingTable) { diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 600790b2fd841..800eca618c5bc 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -183,6 +183,8 @@ public ActionRequestValidationException validate() { validationException = DocWriteRequest.validateSeqNoBasedCASParams(this, validationException); + validationException = DocWriteRequest.validateDocIdLength(id, validationException); + if (ifSeqNo != UNASSIGNED_SEQ_NO) { if (retryOnConflict > 0) { validationException = addValidationError("compare and write operations can not be retried", validationException); diff --git a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java index a8365a62c9e58..8e9977696bc18 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequestBuilder; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskResponse; @@ -293,7 +292,7 @@ public interface ClusterAdminClient extends ElasticsearchClient { * @param request The nodes tasks request * @return The result future */ - ActionFuture cancelTasks(CancelTasksRequest request); + ActionFuture cancelTasks(CancelTasksRequest request); /** * Cancel active tasks @@ -301,7 +300,7 @@ public interface ClusterAdminClient extends ElasticsearchClient { * @param request The nodes tasks request * @param listener A listener to be notified with a result */ - void cancelTasks(CancelTasksRequest request, ActionListener listener); + void cancelTasks(CancelTasksRequest request, ActionListener listener); /** * Cancel active tasks diff --git a/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java index 9ba26b95244ab..d931302740f19 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder; -import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; @@ -30,10 +29,8 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; @@ -56,7 +53,6 @@ import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.resolve.ResolveIndexAction; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequestBuilder; @@ -71,7 +67,6 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequestBuilder; -import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -85,6 +80,7 @@ import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock; import org.elasticsearch.core.Nullable; @@ -261,7 +257,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The refresh request * @return The result future */ - ActionFuture refresh(RefreshRequest request); + ActionFuture refresh(RefreshRequest request); /** * Explicitly refresh one or more indices (making the content indexed since the last refresh searchable). @@ -269,7 +265,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The refresh request * @param listener A listener to be notified with a result */ - void refresh(RefreshRequest request, ActionListener listener); + void refresh(RefreshRequest request, ActionListener listener); /** * Explicitly refresh one or more indices (making the content indexed since the last refresh searchable). @@ -282,7 +278,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The flush request * @return A result future */ - ActionFuture flush(FlushRequest request); + ActionFuture flush(FlushRequest request); /** * Explicitly flush one or more indices (releasing memory from the node). @@ -290,7 +286,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The flush request * @param listener A listener to be notified with a result */ - void flush(FlushRequest request, ActionListener listener); + void flush(FlushRequest request, ActionListener listener); /** * Explicitly flush one or more indices (releasing memory from the node). @@ -303,7 +299,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The optimize request * @return A result future */ - ActionFuture forceMerge(ForceMergeRequest request); + ActionFuture forceMerge(ForceMergeRequest request); /** * Explicitly force merge one or more indices into a the number of segments. @@ -311,7 +307,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The force merge request * @param listener A listener to be notified with a result */ - void forceMerge(ForceMergeRequest request, ActionListener listener); + void forceMerge(ForceMergeRequest request, ActionListener listener); /** * Explicitly force merge one or more indices into a the number of segments. @@ -436,7 +432,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The clear indices cache request * @return The result future */ - ActionFuture clearCache(ClearIndicesCacheRequest request); + ActionFuture clearCache(ClearIndicesCacheRequest request); /** * Clear indices cache. @@ -444,7 +440,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The clear indices cache request * @param listener A listener to be notified with a result */ - void clearCache(ClearIndicesCacheRequest request, ActionListener listener); + void clearCache(ClearIndicesCacheRequest request, ActionListener listener); /** * Clear indices cache. @@ -591,7 +587,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { /** * Shrinks an index using an explicit request allowing to specify the settings, mappings and aliases of the target index of the index. */ - void resizeIndex(ResizeRequest request, ActionListener listener); + void resizeIndex(ResizeRequest request, ActionListener listener); /** * Swaps the index pointed to by an alias given all provided conditions are satisfied diff --git a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java index 12f3dec804809..c6d9c3a8f3563 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java @@ -35,7 +35,6 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequestBuilder; @@ -129,7 +128,6 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheAction; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder; -import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; @@ -144,11 +142,9 @@ import org.elasticsearch.action.admin.indices.flush.FlushAction; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexAction; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder; @@ -179,7 +175,6 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.resolve.ResolveIndexAction; import org.elasticsearch.action.admin.indices.rollover.RolloverAction; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; @@ -199,7 +194,6 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeAction; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequestBuilder; -import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; @@ -275,6 +269,7 @@ import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsAction; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; @@ -806,12 +801,12 @@ public GetTaskRequestBuilder prepareGetTask(TaskId taskId) { } @Override - public ActionFuture cancelTasks(CancelTasksRequest request) { + public ActionFuture cancelTasks(CancelTasksRequest request) { return execute(CancelTasksAction.INSTANCE, request); } @Override - public void cancelTasks(CancelTasksRequest request, ActionListener listener) { + public void cancelTasks(CancelTasksRequest request, ActionListener listener) { execute(CancelTasksAction.INSTANCE, request, listener); } @@ -1118,7 +1113,7 @@ public GetAliasesRequestBuilder prepareGetAliases(String... aliases) { } @Override - public ActionFuture clearCache(final ClearIndicesCacheRequest request) { + public ActionFuture clearCache(final ClearIndicesCacheRequest request) { return execute(ClearIndicesCacheAction.INSTANCE, request); } @@ -1138,7 +1133,7 @@ public GetIndexRequestBuilder prepareGetIndex() { } @Override - public void clearCache(final ClearIndicesCacheRequest request, final ActionListener listener) { + public void clearCache(final ClearIndicesCacheRequest request, final ActionListener listener) { execute(ClearIndicesCacheAction.INSTANCE, request, listener); } @@ -1218,12 +1213,12 @@ public OpenIndexRequestBuilder prepareOpen(String... indices) { } @Override - public ActionFuture flush(final FlushRequest request) { + public ActionFuture flush(final FlushRequest request) { return execute(FlushAction.INSTANCE, request); } @Override - public void flush(final FlushRequest request, final ActionListener listener) { + public void flush(final FlushRequest request, final ActionListener listener) { execute(FlushAction.INSTANCE, request, listener); } @@ -1278,12 +1273,12 @@ public PutMappingRequestBuilder preparePutMapping(String... indices) { } @Override - public ActionFuture forceMerge(final ForceMergeRequest request) { + public ActionFuture forceMerge(final ForceMergeRequest request) { return execute(ForceMergeAction.INSTANCE, request); } @Override - public void forceMerge(final ForceMergeRequest request, final ActionListener listener) { + public void forceMerge(final ForceMergeRequest request, final ActionListener listener) { execute(ForceMergeAction.INSTANCE, request, listener); } @@ -1293,12 +1288,12 @@ public ForceMergeRequestBuilder prepareForceMerge(String... indices) { } @Override - public ActionFuture refresh(final RefreshRequest request) { + public ActionFuture refresh(final RefreshRequest request) { return execute(RefreshAction.INSTANCE, request); } @Override - public void refresh(final RefreshRequest request, final ActionListener listener) { + public void refresh(final RefreshRequest request, final ActionListener listener) { execute(RefreshAction.INSTANCE, request, listener); } @@ -1453,7 +1448,7 @@ public ResizeRequestBuilder prepareResizeIndex(String sourceIndex, String target } @Override - public void resizeIndex(ResizeRequest request, ActionListener listener) { + public void resizeIndex(ResizeRequest request, ActionListener listener) { execute(ResizeAction.INSTANCE, request, listener); } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java index c2b61e496e9c9..0f83e6f2d8e19 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java @@ -144,7 +144,7 @@ public Iterator toXContentChunked(ToXContent.Params params return Iterators.concat(startObject("nodes"), Iterators.map(leastAvailableSpaceUsage.entrySet().iterator(), c -> (builder, p) -> { builder.startObject(c.getKey()); { // node - builder.field("node_name", c.getValue().getNodeName()); + builder.field("node_name", c.getValue().nodeName()); builder.startObject("least_available"); { c.getValue().toShortXContent(builder); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterInfoSimulator.java b/server/src/main/java/org/elasticsearch/cluster/ClusterInfoSimulator.java index 7dbd4f864bdb3..593bb251d3f5e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterInfoSimulator.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterInfoSimulator.java @@ -122,14 +122,14 @@ private void modifyDiskUsage(String nodeId, long freeDelta) { if (diskUsage == null) { return; } - var path = diskUsage.getPath(); + var path = diskUsage.path(); updateDiskUsage(leastAvailableSpaceUsage, nodeId, path, freeDelta); updateDiskUsage(mostAvailableSpaceUsage, nodeId, path, freeDelta); } private void updateDiskUsage(Map availableSpaceUsage, String nodeId, String path, long freeDelta) { var usage = availableSpaceUsage.get(nodeId); - if (usage != null && Objects.equals(usage.getPath(), path)) { + if (usage != null && Objects.equals(usage.path(), path)) { // ensure new value is within bounds availableSpaceUsage.put(nodeId, updateWithFreeBytes(usage, freeDelta)); } @@ -139,7 +139,7 @@ private static DiskUsage updateWithFreeBytes(DiskUsage usage, long delta) { // free bytes might go out of range in case when multiple data path are used // we might not know exact disk used to allocate a shard and conservatively update // most used disk on a target node and least used disk on a source node - var freeBytes = withinRange(0, usage.getTotalBytes(), usage.freeBytes() + delta); + var freeBytes = withinRange(0, usage.totalBytes(), usage.freeBytes() + delta); return usage.copyWithFreeBytes(freeBytes); } diff --git a/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java b/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java index 1d606737edf3a..3bc6f889e5778 100644 --- a/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java +++ b/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java @@ -53,33 +53,21 @@ private static double truncatePercent(double pct) { XContentBuilder toShortXContent(XContentBuilder builder) throws IOException { builder.field("path", this.path); builder.humanReadableField("total_bytes", "total", ByteSizeValue.ofBytes(this.totalBytes)); - builder.humanReadableField("used_bytes", "used", ByteSizeValue.ofBytes(this.getUsedBytes())); + builder.humanReadableField("used_bytes", "used", ByteSizeValue.ofBytes(this.usedBytes())); builder.humanReadableField("free_bytes", "free", ByteSizeValue.ofBytes(this.freeBytes)); - builder.field("free_disk_percent", truncatePercent(this.getFreeDiskAsPercentage())); - builder.field("used_disk_percent", truncatePercent(this.getUsedDiskAsPercentage())); + builder.field("free_disk_percent", truncatePercent(this.freeDiskAsPercentage())); + builder.field("used_disk_percent", truncatePercent(this.usedDiskAsPercentage())); return builder; } public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field("node_id", this.nodeId); builder.field("node_name", this.nodeName); - builder = toShortXContent(builder); + toShortXContent(builder); return builder; } - public String getNodeId() { - return nodeId; - } - - public String getNodeName() { - return nodeName; - } - - public String getPath() { - return path; - } - - public double getFreeDiskAsPercentage() { + public double freeDiskAsPercentage() { // We return 100.0% in order to fail "open", in that if we have invalid // numbers for the total bytes, it's as if we don't know disk usage. if (totalBytes == 0) { @@ -88,20 +76,12 @@ public double getFreeDiskAsPercentage() { return 100.0 * freeBytes / totalBytes; } - public double getUsedDiskAsPercentage() { - return 100.0 - getFreeDiskAsPercentage(); - } - - public long getFreeBytes() { - return freeBytes; - } - - public long getTotalBytes() { - return totalBytes; + public double usedDiskAsPercentage() { + return 100.0 - freeDiskAsPercentage(); } - public long getUsedBytes() { - return getTotalBytes() - getFreeBytes(); + public long usedBytes() { + return totalBytes - freeBytes; } @Override @@ -113,9 +93,9 @@ public String toString() { + "][" + path + "] free: " - + ByteSizeValue.ofBytes(getFreeBytes()) + + ByteSizeValue.ofBytes(this.freeBytes()) + "[" - + Strings.format1Decimals(getFreeDiskAsPercentage(), "%") + + Strings.format1Decimals(freeDiskAsPercentage(), "%") + "]"; } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java index 5134f153a7fbb..c2cd403836593 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; import org.elasticsearch.cluster.coordination.CoordinationState.VoteCollection; @@ -140,6 +141,7 @@ public record ClusterFormationState( VotingConfiguration lastCommittedConfiguration, List resolvedAddresses, List foundPeers, + Set mastersOfPeers, long currentTerm, boolean hasDiscoveredQuorum, StatusInfo statusInfo, @@ -151,6 +153,7 @@ public ClusterFormationState( ClusterState clusterState, List resolvedAddresses, List foundPeers, + Set mastersOfPeers, long currentTerm, ElectionStrategy electionStrategy, StatusInfo statusInfo, @@ -166,6 +169,7 @@ public ClusterFormationState( clusterState.getLastCommittedConfiguration(), resolvedAddresses, foundPeers, + mastersOfPeers, currentTerm, calculateHasDiscoveredQuorum( foundPeers, @@ -216,6 +220,9 @@ public ClusterFormationState(StreamInput in) throws IOException { new VotingConfiguration(in), in.readCollectionAsImmutableList(TransportAddress::new), in.readCollectionAsImmutableList(DiscoveryNode::new), + in.getTransportVersion().onOrAfter(TransportVersions.PEERFINDER_REPORTS_PEERS_MASTERS) + ? in.readCollectionAsImmutableSet(DiscoveryNode::new) + : Set.of(), in.readLong(), in.readBoolean(), new StatusInfo(in), @@ -250,12 +257,19 @@ private String getCoordinatorDescription() { acceptedTerm ); - final StringBuilder foundPeersDescription = new StringBuilder(); + final StringBuilder foundPeersDescription = new StringBuilder("["); DiscoveryNodes.addCommaSeparatedNodesWithoutAttributes(foundPeers.iterator(), foundPeersDescription); + if (mastersOfPeers.isEmpty()) { + foundPeersDescription.append(']'); + } else { + foundPeersDescription.append("] who claim current master to be ["); + DiscoveryNodes.addCommaSeparatedNodesWithoutAttributes(mastersOfPeers.iterator(), foundPeersDescription); + foundPeersDescription.append(']'); + } final String discoveryStateIgnoringQuorum = String.format( Locale.ROOT, - "have discovered [%s]; %s", + "have discovered %s; %s", foundPeersDescription, discoveryWillContinueDescription ); @@ -291,7 +305,7 @@ private String getCoordinatorDescription() { if (lastCommittedConfiguration.equals(VotingConfiguration.MUST_JOIN_ELECTED_MASTER)) { return String.format( Locale.ROOT, - "master not discovered yet and this node was detached from its previous cluster, have discovered [%s]; %s", + "master not discovered yet and this node was detached from its previous cluster, have discovered %s; %s", foundPeersDescription, discoveryWillContinueDescription ); @@ -310,7 +324,7 @@ private String getCoordinatorDescription() { return String.format( Locale.ROOT, - "master not discovered or elected yet, an election requires %s, %s [%s]; %s", + "master not discovered or elected yet, an election requires %s, %s %s; %s", quorumDescription, haveDiscoveredQuorum, foundPeersDescription, @@ -388,6 +402,9 @@ public void writeTo(StreamOutput out) throws IOException { lastCommittedConfiguration.writeTo(out); out.writeCollection(resolvedAddresses); out.writeCollection(foundPeers); + if (out.getTransportVersion().onOrAfter(TransportVersions.PEERFINDER_REPORTS_PEERS_MASTERS)) { + out.writeCollection(mastersOfPeers); + } out.writeLong(currentTerm); out.writeBoolean(hasDiscoveredQuorum); statusInfo.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 3da890b37ade8..927ca1152a658 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -334,6 +334,7 @@ public ClusterFormationState getClusterFormationState() { getLastAcceptedState(), // doesn't care about blocks or the current master node so no need for getStateForMasterService peerFinder.getLastResolvedAddresses(), Stream.concat(Stream.of(getLocalNode()), StreamSupport.stream(peerFinder.getFoundPeers().spliterator(), false)).toList(), + peerFinder.getMastersOfPeers(), getCurrentTerm(), electionStrategy, nodeHealthService.getHealth(), diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java index 56289ab348a3a..80b4b455912e7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java @@ -10,6 +10,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.core.Nullable; import org.elasticsearch.health.Diagnosis; import org.elasticsearch.health.HealthIndicatorDetails; @@ -39,14 +40,36 @@ public class StableMasterHealthIndicatorService implements HealthIndicatorService { public static final String NAME = "master_is_stable"; - public static final String GET_HELP_GUIDE = "https://ela.st/getting-help"; + + public static final Diagnosis TROUBLESHOOT_DISCOVERY = new Diagnosis( + new Diagnosis.Definition( + NAME, + "troubleshoot_discovery", + "The Elasticsearch cluster does not have a stable master node.", + "See discovery troubleshooting guidance at " + ReferenceDocs.DISCOVERY_TROUBLESHOOTING, + ReferenceDocs.DISCOVERY_TROUBLESHOOTING.toString() + ), + null + ); + + public static final Diagnosis TROUBLESHOOT_UNSTABLE_CLUSTER = new Diagnosis( + new Diagnosis.Definition( + NAME, + "troubleshoot_unstable_cluster", + "The Elasticsearch cluster does not have a stable master node.", + "See unstable cluster troubleshooting guidance at " + ReferenceDocs.UNSTABLE_CLUSTER_TROUBLESHOOTING, + ReferenceDocs.UNSTABLE_CLUSTER_TROUBLESHOOTING.toString() + ), + null + ); + public static final Diagnosis CONTACT_SUPPORT = new Diagnosis( new Diagnosis.Definition( NAME, "contact_support", "The Elasticsearch cluster does not have a stable master node.", - "Get help at " + GET_HELP_GUIDE, - GET_HELP_GUIDE + "Get help at " + ReferenceDocs.CONTACT_SUPPORT, + ReferenceDocs.CONTACT_SUPPORT.toString() ), null ); @@ -67,12 +90,13 @@ public class StableMasterHealthIndicatorService implements HealthIndicatorServic public static final String BACKUP_DISABLED_IMPACT_ID = "backup_disabled"; // Impacts of having an unstable master: - private static final String UNSTABLE_MASTER_INGEST_IMPACT = "The cluster cannot create, delete, or rebalance indices, and cannot " - + "insert or update documents."; - private static final String UNSTABLE_MASTER_DEPLOYMENT_MANAGEMENT_IMPACT = "Scheduled tasks such as Watcher, Index Lifecycle " - + "Management, and Snapshot Lifecycle Management will not work. The _cat APIs will not work."; - private static final String UNSTABLE_MASTER_BACKUP_IMPACT = "Snapshot and restore will not work, your data will not be backed up. " - + "Searchable snapshots cannot be mounted."; + private static final String UNSTABLE_MASTER_INGEST_IMPACT = """ + The cluster cannot create, delete, or rebalance indices, and cannot insert or update documents."""; + private static final String UNSTABLE_MASTER_DEPLOYMENT_MANAGEMENT_IMPACT = """ + Scheduled tasks such as Watcher, Index Lifecycle Management, and Snapshot Lifecycle Management will not work. \ + The _cat APIs will not work."""; + private static final String UNSTABLE_MASTER_BACKUP_IMPACT = """ + Snapshot and restore will not work. Your data will not be backed up, and searchable snapshots cannot be mounted."""; /** * This is the list of the impacts to be reported when the master node is determined to be unstable. @@ -128,7 +152,7 @@ HealthIndicatorResult getHealthIndicatorResult( HealthStatus status = HealthStatus.fromCoordinationDiagnosticsStatus(coordinationDiagnosticsResult.status()); HealthIndicatorDetails details = getDetails(coordinationDiagnosticsResult.details(), explain); Collection impacts = status.indicatesHealthProblem() ? UNSTABLE_MASTER_IMPACTS : List.of(); - List diagnosis = status.indicatesHealthProblem() ? getContactSupportUserActions(explain) : List.of(); + List diagnosis = status.indicatesHealthProblem() ? getUnstableMasterDiagnoses(explain) : List.of(); return createIndicator(status, coordinationDiagnosticsResult.summary(), details, impacts, diagnosis); } @@ -215,13 +239,16 @@ private String getNameForNodeId(String nodeId) { } /** - * This method returns the only user action that is relevant when the master is unstable -- contact support. - * @param explain If true, the returned list includes a UserAction to contact support, otherwise an empty list - * @return a single UserAction instructing users to contact support. + * This method returns the relevant user actions when the master is unstable, linking to some troubleshooting docs and suggesting to + * contact support. + * + * @param explain If true, the returned list includes UserActions linking to troubleshooting docs and another to contact support, + * otherwise an empty list. + * @return the relevant user actions when the master is unstable. */ - private static List getContactSupportUserActions(boolean explain) { + private List getUnstableMasterDiagnoses(boolean explain) { if (explain) { - return List.of(CONTACT_SUPPORT); + return List.of(TROUBLESHOOT_DISCOVERY, TROUBLESHOOT_UNSTABLE_CLUSTER, CONTACT_SUPPORT); } else { return List.of(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 84db5887b5926..ff31c6fe950d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -907,7 +907,7 @@ public DataStream(StreamInput in) throws IOException { in.readBoolean(), in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0) ? in.readBoolean() : false, in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null, - in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null, + in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null, in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? in.readBoolean() : false, in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? readIndices(in) : List.of(), in.getTransportVersion().onOrAfter(TransportVersions.LAZY_ROLLOVER_ADDED) ? in.readBoolean() : false @@ -944,7 +944,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { out.writeOptionalEnum(indexMode); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(lifecycle); } if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index 83a5d99c8f348..215ed515748ab 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -48,7 +48,7 @@ public class DataStreamLifecycle implements SimpleDiffable, ToXContentObject { // Versions over the wire - public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_500_061; + public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_10_X; public static final String DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME = "data_streams.lifecycle_only.mode"; @@ -187,7 +187,7 @@ public int hashCode() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(dataRetention); } if (out.getTransportVersion().onOrAfter(ADDED_ENABLED_FLAG_VERSION)) { @@ -197,7 +197,7 @@ public void writeTo(StreamOutput out) throws IOException { } public DataStreamLifecycle(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { dataRetention = in.readOptionalWriteable(Retention::read); } else { dataRetention = null; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java index aaf256a49a0a5..3453b3b6d70ff 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java @@ -35,8 +35,8 @@ public class SingleNodeShutdownMetadata implements SimpleDiffable, ToXContentObject { public static final TransportVersion REPLACE_SHUTDOWN_TYPE_ADDED_VERSION = TransportVersions.V_7_16_0; - public static final TransportVersion SIGTERM_ADDED_VERSION = TransportVersions.V_8_500_020; - public static final TransportVersion GRACE_PERIOD_ADDED_VERSION = TransportVersions.V_8_500_020; + public static final TransportVersion SIGTERM_ADDED_VERSION = TransportVersions.V_8_9_X; + public static final TransportVersion GRACE_PERIOD_ADDED_VERSION = TransportVersions.V_8_9_X; public static final ParseField NODE_ID_FIELD = new ParseField("node_id"); public static final ParseField TYPE_FIELD = new ParseField("type"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java index d36b70b49c6ab..18a99f984707f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java @@ -123,7 +123,7 @@ public Template(StreamInput in) throws IOException { } if (in.getTransportVersion().onOrAfter(DataStreamLifecycle.ADDED_ENABLED_FLAG_VERSION)) { this.lifecycle = in.readOptionalWriteable(DataStreamLifecycle::new); - } else if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + } else if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { boolean isExplicitNull = in.readBoolean(); if (isExplicitNull) { this.lifecycle = DataStreamLifecycle.newBuilder().enabled(false).build(); @@ -177,7 +177,7 @@ public void writeTo(StreamOutput out) throws IOException { } if (out.getTransportVersion().onOrAfter(DataStreamLifecycle.ADDED_ENABLED_FLAG_VERSION)) { out.writeOptionalWriteable(lifecycle); - } else if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + } else if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { boolean isExplicitNull = lifecycle != null && lifecycle.isEnabled() == false; out.writeBoolean(isExplicitNull); if (isExplicitNull == false) { diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index e77a7b27e1a2c..01b67068db31f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -337,7 +337,7 @@ public DiscoveryNode(StreamInput in) throws IOException { } } this.roles = Collections.unmodifiableSortedSet(roles); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { versionInfo = new VersionInformation(Version.readVersion(in), IndexVersion.readVersion(in), IndexVersion.readVersion(in)); } else { versionInfo = inferVersionInformation(Version.readVersion(in)); @@ -374,7 +374,7 @@ public void writeTo(StreamOutput out) throws IOException { o.writeString(role.roleNameAbbreviation()); o.writeBoolean(role.canContainData()); }); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { Version.writeVersion(versionInfo.nodeVersion(), out); IndexVersion.writeVersion(versionInfo.minIndexVersion(), out); IndexVersion.writeVersion(versionInfo.maxIndexVersion(), out); diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index cd2c927d87f69..918056fea9ec6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -667,7 +667,7 @@ public String shortSummary() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(masterNodeId); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeVLong(nodeLeftGeneration); } // else nodeLeftGeneration is zero, or we're sending this to a remote cluster which does not care about the nodeLeftGeneration out.writeCollection(nodes.values()); @@ -682,7 +682,7 @@ public static DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) t builder.localNodeId(localNode.getId()); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { builder.nodeLeftGeneration(in.readVLong()); } // else nodeLeftGeneration is zero, or we're receiving this from a remote cluster so the nodeLeftGeneration does not matter to us diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java index bb0ca372e6a4c..76efc62e3ca06 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -186,10 +186,10 @@ public void onNewInfo(ClusterInfo info) { final String node = entry.getKey(); final DiskUsage usage = entry.getValue(); final RoutingNode routingNode = routingNodes.node(node); - final ByteSizeValue total = ByteSizeValue.ofBytes(usage.getTotalBytes()); + final ByteSizeValue total = ByteSizeValue.ofBytes(usage.totalBytes()); if (isDedicatedFrozenNode(routingNode)) { - if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdFrozenFloodStage(total).getBytes()) { + if (usage.freeBytes() < diskThresholdSettings.getFreeBytesThresholdFrozenFloodStage(total).getBytes()) { logger.warn( "flood stage disk watermark [{}] exceeded on {}", diskThresholdSettings.describeFrozenFloodStageThreshold(total, false), @@ -201,7 +201,7 @@ public void onNewInfo(ClusterInfo info) { continue; } - if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdFloodStage(total).getBytes()) { + if (usage.freeBytes() < diskThresholdSettings.getFreeBytesThresholdFloodStage(total).getBytes()) { nodesOverLowThreshold.add(node); nodesOverHighThreshold.add(node); nodesOverHighThresholdAndRelocating.remove(node); @@ -223,7 +223,7 @@ public void onNewInfo(ClusterInfo info) { continue; } - if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdHighStage(total).getBytes()) { + if (usage.freeBytes() < diskThresholdSettings.getFreeBytesThresholdHighStage(total).getBytes()) { if (routingNode != null) { // might be temporarily null if the ClusterInfoService and the ClusterService are out of step for (ShardRouting routing : routingNode) { String indexName = routing.index().getName(); @@ -232,16 +232,16 @@ public void onNewInfo(ClusterInfo info) { } } - final long reservedSpace = info.getReservedSpace(usage.getNodeId(), usage.getPath()).total(); + final long reservedSpace = info.getReservedSpace(usage.nodeId(), usage.path()).total(); final DiskUsage usageWithReservedSpace = new DiskUsage( - usage.getNodeId(), - usage.getNodeName(), - usage.getPath(), - usage.getTotalBytes(), - Math.max(0L, usage.getFreeBytes() - reservedSpace) + usage.nodeId(), + usage.nodeName(), + usage.path(), + usage.totalBytes(), + Math.max(0L, usage.freeBytes() - reservedSpace) ); - if (usageWithReservedSpace.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdHighStage(total).getBytes()) { + if (usageWithReservedSpace.freeBytes() < diskThresholdSettings.getFreeBytesThresholdHighStage(total).getBytes()) { nodesOverLowThreshold.add(node); nodesOverHighThreshold.add(node); @@ -258,7 +258,7 @@ public void onNewInfo(ClusterInfo info) { ); } - } else if (usageWithReservedSpace.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdLowStage(total).getBytes()) { + } else if (usageWithReservedSpace.freeBytes() < diskThresholdSettings.getFreeBytesThresholdLowStage(total).getBytes()) { nodesOverHighThresholdAndRelocating.remove(node); final boolean wasUnderLowThreshold = nodesOverLowThreshold.add(node); @@ -321,33 +321,33 @@ public void onNewInfo(ClusterInfo info) { ActionListener.releaseAfter(ActionListener.runAfter(ActionListener.wrap(ignored -> { final var reroutedClusterState = clusterStateSupplier.get(); for (DiskUsage diskUsage : usagesOverHighThreshold) { - final RoutingNode routingNode = reroutedClusterState.getRoutingNodes().node(diskUsage.getNodeId()); + final RoutingNode routingNode = reroutedClusterState.getRoutingNodes().node(diskUsage.nodeId()); final DiskUsage usageIncludingRelocations; final long relocatingShardsSize; if (routingNode != null) { // might be temporarily null if ClusterInfoService and ClusterService are out of step relocatingShardsSize = sizeOfRelocatingShards(routingNode, diskUsage, info, reroutedClusterState); usageIncludingRelocations = new DiskUsage( - diskUsage.getNodeId(), - diskUsage.getNodeName(), - diskUsage.getPath(), - diskUsage.getTotalBytes(), - diskUsage.getFreeBytes() - relocatingShardsSize + diskUsage.nodeId(), + diskUsage.nodeName(), + diskUsage.path(), + diskUsage.totalBytes(), + diskUsage.freeBytes() - relocatingShardsSize ); } else { usageIncludingRelocations = diskUsage; relocatingShardsSize = 0L; } - final ByteSizeValue total = ByteSizeValue.ofBytes(usageIncludingRelocations.getTotalBytes()); + final ByteSizeValue total = ByteSizeValue.ofBytes(usageIncludingRelocations.totalBytes()); - if (usageIncludingRelocations.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdHighStage(total) + if (usageIncludingRelocations.freeBytes() < diskThresholdSettings.getFreeBytesThresholdHighStage(total) .getBytes()) { - nodesOverHighThresholdAndRelocating.remove(diskUsage.getNodeId()); + nodesOverHighThresholdAndRelocating.remove(diskUsage.nodeId()); logger.warn(""" high disk watermark [{}] exceeded on {}, shards will be relocated away from this node; currently \ relocating away shards totalling [{}] bytes; the node is expected to continue to exceed the high disk \ watermark when these relocations are complete\ """, diskThresholdSettings.describeHighThreshold(total, false), diskUsage, -relocatingShardsSize); - } else if (nodesOverHighThresholdAndRelocating.add(diskUsage.getNodeId())) { + } else if (nodesOverHighThresholdAndRelocating.add(diskUsage.nodeId())) { logger.info(""" high disk watermark [{}] exceeded on {}, shards will be relocated away from this node; currently \ relocating away shards totalling [{}] bytes; the node is expected to be below the high disk watermark \ @@ -424,7 +424,7 @@ long sizeOfRelocatingShards(RoutingNode routingNode, DiskUsage diskUsage, Cluste return DiskThresholdDecider.sizeOfUnaccountedShards( routingNode, true, - diskUsage.getPath(), + diskUsage.path(), info, SnapshotShardSizeInfo.EMPTY, reroutedClusterState.metadata(), diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java index 4e674648bc3a4..3c0125272b094 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java @@ -160,7 +160,7 @@ private static Map unaccountedSearchableSnapshotSizes(ClusterState if (clusterInfo != null) { for (RoutingNode node : clusterState.getRoutingNodes()) { DiskUsage usage = clusterInfo.getNodeMostAvailableDiskUsages().get(node.nodeId()); - ClusterInfo.ReservedSpace reservedSpace = clusterInfo.getReservedSpace(node.nodeId(), usage != null ? usage.getPath() : ""); + ClusterInfo.ReservedSpace reservedSpace = clusterInfo.getReservedSpace(node.nodeId(), usage != null ? usage.path() : ""); long totalSize = 0; for (ShardRouting shard : node.started()) { if (shard.getExpectedShardSize() > 0 diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 22bed76fa2b2e..952b2fbf8f2e2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -472,9 +472,9 @@ private static DiskUsageWithRelocations getDiskUsage( logger.debug( "unable to determine disk usage for {}, defaulting to average across nodes [{} total] [{} free] [{}% free]", node.nodeId(), - usage.getTotalBytes(), - usage.getFreeBytes(), - usage.getFreeDiskAsPercentage() + usage.totalBytes(), + usage.freeBytes(), + usage.freeDiskAsPercentage() ); } @@ -483,7 +483,7 @@ private static DiskUsageWithRelocations getDiskUsage( sizeOfUnaccountedShards( node, subtractLeavingShards, - usage.getPath(), + usage.path(), allocation.clusterInfo(), allocation.snapshotShardSizeInfo(), allocation.metadata(), @@ -509,8 +509,8 @@ static DiskUsage averageUsage(RoutingNode node, Map usages) { long totalBytes = 0; long freeBytes = 0; for (DiskUsage du : usages.values()) { - totalBytes += du.getTotalBytes(); - freeBytes += du.getFreeBytes(); + totalBytes += du.totalBytes(); + freeBytes += du.freeBytes(); } return new DiskUsage(node.nodeId(), node.node().getName(), "_na_", totalBytes / usages.size(), freeBytes / usages.size()); } @@ -548,18 +548,18 @@ record DiskUsageWithRelocations(DiskUsage diskUsage, long relocatingShardSize) { long getFreeBytes() { try { - return Math.subtractExact(diskUsage.getFreeBytes(), relocatingShardSize); + return Math.subtractExact(diskUsage.freeBytes(), relocatingShardSize); } catch (ArithmeticException e) { return Long.MAX_VALUE; } } String getPath() { - return diskUsage.getPath(); + return diskUsage.path(); } long getTotalBytes() { - return diskUsage.getTotalBytes(); + return diskUsage.totalBytes(); } } diff --git a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java index 80c969cc1b084..67a9e23f2297f 100644 --- a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java +++ b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java @@ -69,6 +69,7 @@ public enum ReferenceDocs { BOOTSTRAP_CHECK_TLS, BOOTSTRAP_CHECK_TOKEN_SSL, BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP, + CONTACT_SUPPORT, // this comment keeps the ';' on the next line so every entry above has a trailing ',' which makes the diff for adding new links cleaner ; diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java index 905373f9400f6..8973ae6e9dd3a 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java @@ -32,7 +32,6 @@ public final class ReleasableBytesReference implements RefCounted, Releasable, B private final RefCounted refCounted; public static ReleasableBytesReference empty() { - EMPTY.incRef(); return EMPTY; } @@ -147,6 +146,9 @@ public StreamInput streamInput() throws IOException { assert hasReferences(); return new BytesReferenceStreamInput(this) { private ReleasableBytesReference retainAndSkip(int len) throws IOException { + if (len == 0) { + return ReleasableBytesReference.empty(); + } // instead of reading the bytes from a stream we just create a slice of the underlying bytes final ReleasableBytesReference result = retainedSlice(offset(), len); // move the stream manually since creating the slice didn't move it diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index d7774d5c0a7ea..e6865e5c66e74 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -329,6 +329,29 @@ public boolean offer(E e) { } } + // Overridden to workaround a JDK bug introduced in JDK 21.0.2 + // https://bugs.openjdk.org/browse/JDK-8323659 + @Override + public void put(E e) { + // As the queue is unbounded, this method will always add to the queue. + super.offer(e); + } + + // Overridden to workaround a JDK bug introduced in JDK 21.0.2 + // https://bugs.openjdk.org/browse/JDK-8323659 + @Override + public boolean add(E e) { + // As the queue is unbounded, this method will never return false. + return super.offer(e); + } + + // Overridden to workaround a JDK bug introduced in JDK 21.0.2 + // https://bugs.openjdk.org/browse/JDK-8323659 + @Override + public boolean offer(E e, long timeout, TimeUnit unit) { + // As the queue is unbounded, this method will never return false. + return super.offer(e); + } } /** diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionHandler.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionHandler.java index 3878a4a2dff9d..9457773eb8071 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionHandler.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionHandler.java @@ -9,6 +9,8 @@ package org.elasticsearch.common.util.concurrent; import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.MeterRegistry; import java.util.concurrent.RejectedExecutionHandler; import java.util.concurrent.ThreadPoolExecutor; @@ -16,6 +18,7 @@ public abstract class EsRejectedExecutionHandler implements RejectedExecutionHandler { private final CounterMetric rejected = new CounterMetric(); + private LongCounter rejectionCounter = null; /** * The number of rejected executions. @@ -26,6 +29,14 @@ public long rejected() { protected void incrementRejections() { rejected.inc(); + if (rejectionCounter != null) { + rejectionCounter.increment(); + } + } + + public void registerCounter(MeterRegistry meterRegistry, String prefix, String name) { + rejectionCounter = meterRegistry.registerLongCounter(prefix + ".rejected.total", "number of rejected threads for " + name, "count"); + rejectionCounter.incrementBy(rejected()); } protected static EsRejectedExecutionException newRejectedException( diff --git a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java index ec315f5200978..5289ac57e10ca 100644 --- a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java @@ -40,6 +40,7 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.Executor; +import java.util.stream.Collectors; import static java.util.Collections.emptyList; import static org.elasticsearch.core.Strings.format; @@ -352,10 +353,17 @@ protected void startProbe(TransportAddress transportAddress) { } } + public Set getMastersOfPeers() { + synchronized (mutex) { + return peersByAddress.values().stream().flatMap(p -> p.lastKnownMasterNode.stream()).collect(Collectors.toSet()); + } + } + private class Peer { private final TransportAddress transportAddress; private final SetOnce probeConnectionResult = new SetOnce<>(); private volatile boolean peersRequestInFlight; + private Optional lastKnownMasterNode = Optional.empty(); Peer(TransportAddress transportAddress) { this.transportAddress = transportAddress; @@ -439,9 +447,20 @@ public void onResponse(ProbeConnectionResult connectResult) { @Override public void onFailure(Exception e) { if (verboseFailureLogging) { + + final String believedMasterBy; + synchronized (mutex) { + believedMasterBy = peersByAddress.values() + .stream() + .filter(p -> p.lastKnownMasterNode.map(DiscoveryNode::getAddress).equals(Optional.of(transportAddress))) + .findFirst() + .map(p -> " [current master according to " + p.getDiscoveryNode().descriptionWithoutAttributes() + "]") + .orElse(""); + } + if (logger.isDebugEnabled()) { // log message at level WARN, but since DEBUG logging is enabled we include the full stack trace - logger.warn(() -> format("%s discovery result", Peer.this), e); + logger.warn(() -> format("%s%s discovery result", Peer.this, believedMasterBy), e); } else { final StringBuilder messageBuilder = new StringBuilder(); Throwable cause = e; @@ -452,7 +471,7 @@ public void onFailure(Exception e) { final String message = messageBuilder.length() < 1024 ? messageBuilder.toString() : (messageBuilder.substring(0, 1023) + "..."); - logger.warn("{} discovery result{}", Peer.this, message); + logger.warn("{}{} discovery result{}", Peer.this, believedMasterBy, message); } } else { logger.debug(() -> format("%s discovery result", Peer.this), e); @@ -504,6 +523,7 @@ public void handleResponse(PeersResponse response) { return; } + lastKnownMasterNode = response.getMasterNode(); response.getMasterNode().ifPresent(node -> startProbe(node.getAddress())); for (DiscoveryNode node : response.getKnownPeers()) { startProbe(node.getAddress()); @@ -545,7 +565,13 @@ Releasable getConnectionReference() { @Override public String toString() { - return "address [" + transportAddress + "], node [" + getDiscoveryNode() + "], requesting [" + peersRequestInFlight + "]"; + return "address [" + + transportAddress + + "], node [" + + Optional.ofNullable(probeConnectionResult.get()) + .map(result -> result.getDiscoveryNode().descriptionWithoutAttributes()) + .orElse("unknown") + + (peersRequestInFlight ? " [request in flight]" : ""); } } } diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java index 0415e0c90ba8a..94cd518051199 100644 --- a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java +++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java @@ -432,24 +432,24 @@ DiskHealthInfo getHealth(HealthMetadata healthMetadata, ClusterState clusterStat return new DiskHealthInfo(HealthStatus.UNKNOWN, DiskHealthInfo.Cause.NODE_HAS_NO_DISK_STATS); } - ByteSizeValue totalBytes = ByteSizeValue.ofBytes(usage.getTotalBytes()); + ByteSizeValue totalBytes = ByteSizeValue.ofBytes(usage.totalBytes()); if (node.isDedicatedFrozenNode() || isDedicatedSearchNode(node)) { long frozenFloodStageThreshold = diskMetadata.getFreeBytesFrozenFloodStageWatermark(totalBytes).getBytes(); - if (usage.getFreeBytes() < frozenFloodStageThreshold) { + if (usage.freeBytes() < frozenFloodStageThreshold) { logger.debug("Flood stage disk watermark [{}] exceeded on {}", frozenFloodStageThreshold, usage); return new DiskHealthInfo(HealthStatus.RED, DiskHealthInfo.Cause.FROZEN_NODE_OVER_FLOOD_STAGE_THRESHOLD); } return new DiskHealthInfo(HealthStatus.GREEN); } long floodStageThreshold = diskMetadata.getFreeBytesFloodStageWatermark(totalBytes).getBytes(); - if (usage.getFreeBytes() < floodStageThreshold) { + if (usage.freeBytes() < floodStageThreshold) { logger.debug("Flood stage disk watermark [{}] exceeded on {}", floodStageThreshold, usage); return new DiskHealthInfo(HealthStatus.RED, DiskHealthInfo.Cause.NODE_OVER_THE_FLOOD_STAGE_THRESHOLD); } long highThreshold = diskMetadata.getFreeBytesHighWatermark(totalBytes).getBytes(); - if (usage.getFreeBytes() < highThreshold) { + if (usage.freeBytes() < highThreshold) { if (node.canContainData()) { // for data nodes only report YELLOW if shards can't move away from the node if (DiskCheck.hasRelocatingShards(clusterState, node) == false) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java index 187d59a88e2fd..d5098e1021a1c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java @@ -128,7 +128,7 @@ protected Object parseSourceValue(Object value) { }; } - public ValueFetcher valueFetcher(Set sourcePaths, Object nullValue, String format) { + public ValueFetcher valueFetcher(Set sourcePaths, T nullValue, String format) { Function, List> formatter = getFormatter(format != null ? format : GeometryFormatterFactory.GEOJSON); return new ArraySourceValueFetcher(sourcePaths, nullValueAsSource(nullValue)) { @Override @@ -140,7 +140,15 @@ protected Object parseSourceValue(Object value) { }; } - protected abstract Object nullValueAsSource(Object nullValue); + @Override + public BlockLoader blockLoader(BlockLoaderContext blContext) { + // Currently we can only load from source in ESQL + ValueFetcher fetcher = valueFetcher(blContext.sourcePaths(name()), nullValue, GeometryFormatterFactory.WKB); + // TODO consider optimization using BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) + return new BlockSourceReader.GeometriesBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll()); + } + + protected abstract Object nullValueAsSource(T nullValue); } private final Explicit ignoreMalformed; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java index 031b67c263505..be6e00d5c7b45 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.TriFunction; -import org.elasticsearch.common.geo.GeometryFormatterFactory; import org.elasticsearch.common.geo.SpatialPoint; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedFunction; @@ -174,20 +173,8 @@ protected AbstractPointFieldType( } @Override - protected Object nullValueAsSource(Object nullValue) { - if (nullValue == null) { - return null; - } - SpatialPoint point = (SpatialPoint) nullValue; - return point.toWKT(); - } - - @Override - public BlockLoader blockLoader(BlockLoaderContext blContext) { - // Currently we can only load from source in ESQL - ValueFetcher fetcher = valueFetcher(blContext.sourcePaths(name()), nullValue, GeometryFormatterFactory.WKB); - // TODO consider optimization using BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) - return new BlockSourceReader.GeometriesBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll()); + protected Object nullValueAsSource(T nullValue) { + return nullValue == null ? null : nullValue.toWKT(); } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java index c18c4db955a43..56f1faeb38a5b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java @@ -64,14 +64,8 @@ public Orientation orientation() { } @Override - public BlockLoader blockLoader(BlockLoaderContext blContext) { - // TODO: Support shapes in ESQL - return null; - } - - @Override - protected Object nullValueAsSource(Object nullValue) { - // TODO: When we support shapes in ESQL; we need to return a shape in source format here + protected Object nullValueAsSource(T nullValue) { + // we don't support null value fors shapes return nullValue; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index 27424d4591ba6..f165361ded105 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -29,6 +29,15 @@ import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.queries.function.FunctionQuery; +import org.apache.lucene.queries.function.valuesource.ByteKnnVectorFieldSource; +import org.apache.lucene.queries.function.valuesource.ByteVectorSimilarityFunction; +import org.apache.lucene.queries.function.valuesource.ConstKnnByteVectorValueSource; +import org.apache.lucene.queries.function.valuesource.ConstKnnFloatValueSource; +import org.apache.lucene.queries.function.valuesource.FloatKnnVectorFieldSource; +import org.apache.lucene.queries.function.valuesource.FloatVectorSimilarityFunction; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.KnnByteVectorQuery; import org.apache.lucene.search.Query; @@ -1063,6 +1072,67 @@ public Query createKnnQuery( return knnQuery; } + public Query createExactKnnQuery(float[] queryVector) { + if (isIndexed() == false) { + throw new IllegalArgumentException( + "to perform knn search on field [" + name() + "], its mapping must have [index] set to [true]" + ); + } + if (queryVector.length != dims) { + throw new IllegalArgumentException( + "the query vector has a different dimension [" + queryVector.length + "] than the index vectors [" + dims + "]" + ); + } + elementType.checkVectorBounds(queryVector); + if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { + float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); + elementType.checkVectorMagnitude(similarity, ElementType.errorFloatElementsAppender(queryVector), squaredMagnitude); + if (similarity == VectorSimilarity.COSINE + && ElementType.FLOAT.equals(elementType) + && indexVersionCreated.onOrAfter(NORMALIZE_COSINE) + && isNotUnitVector(squaredMagnitude)) { + float length = (float) Math.sqrt(squaredMagnitude); + queryVector = Arrays.copyOf(queryVector, queryVector.length); + for (int i = 0; i < queryVector.length; i++) { + queryVector[i] /= length; + } + } + } + VectorSimilarityFunction vectorSimilarityFunction = similarity.vectorSimilarityFunction(indexVersionCreated, elementType); + return switch (elementType) { + case BYTE -> { + byte[] bytes = new byte[queryVector.length]; + for (int i = 0; i < queryVector.length; i++) { + bytes[i] = (byte) queryVector[i]; + } + yield new BooleanQuery.Builder().add(new FieldExistsQuery(name()), BooleanClause.Occur.FILTER) + .add( + new FunctionQuery( + new ByteVectorSimilarityFunction( + vectorSimilarityFunction, + new ByteKnnVectorFieldSource(name()), + new ConstKnnByteVectorValueSource(bytes) + ) + ), + BooleanClause.Occur.SHOULD + ) + .build(); + } + case FLOAT -> new BooleanQuery.Builder().add(new FieldExistsQuery(name()), BooleanClause.Occur.FILTER) + .add( + new FunctionQuery( + new FloatVectorSimilarityFunction( + vectorSimilarityFunction, + new FloatKnnVectorFieldSource(name()), + new ConstKnnFloatValueSource(queryVector) + ) + ), + BooleanClause.Occur.SHOULD + ) + .build(); + }; + } + public Query createKnnQuery( float[] queryVector, int numCands, @@ -1082,7 +1152,6 @@ public Query createKnnQuery( ); } elementType.checkVectorBounds(queryVector); - if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); elementType.checkVectorMagnitude(similarity, ElementType.errorFloatElementsAppender(queryVector), squaredMagnitude); @@ -1110,6 +1179,7 @@ && isNotUnitVector(squaredMagnitude)) { case FLOAT -> parentFilter != null ? new ProfilingDiversifyingChildrenFloatKnnVectorQuery(name(), queryVector, filter, numCands, parentFilter) : new ProfilingKnnFloatVectorQuery(name(), queryVector, numCands, filter); + }; if (similarityThreshold != null) { diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java index f86142ffbe862..d3d7b46d3d729 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java @@ -296,6 +296,10 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws if (queryRewriteContext == null) { return this; } + final InnerHitsRewriteContext ihrc = queryRewriteContext.convertToInnerHitsRewriteContext(); + if (ihrc != null) { + return doInnerHitsRewrite(ihrc); + } final CoordinatorRewriteContext crc = queryRewriteContext.convertToCoordinatorRewriteContext(); if (crc != null) { return doCoordinatorRewrite(crc); @@ -342,6 +346,16 @@ protected QueryBuilder doIndexMetadataRewrite(final QueryRewriteContext context) return this; } + /** + * Optional rewrite logic that allows for optimization for extracting inner hits + * @param context an {@link InnerHitsRewriteContext} instance + * @return A {@link QueryBuilder} representing the rewritten query optimized for inner hit extraction + * @throws IOException if an error occurs while rewriting the query + */ + protected QueryBuilder doInnerHitsRewrite(final InnerHitsRewriteContext context) throws IOException { + return this; + } + /** * For internal usage only! * diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitsRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitsRewriteContext.java new file mode 100644 index 0000000000000..0b437fa451e1b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitsRewriteContext.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.index.query; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xcontent.XContentParserConfiguration; + +import java.util.function.LongSupplier; + +/** + * Context object used to rewrite {@link QueryBuilder} instances into an optimized version for extracting inner_hits. + */ +public final class InnerHitsRewriteContext extends QueryRewriteContext { + public InnerHitsRewriteContext(final XContentParserConfiguration parserConfiguration, final LongSupplier nowInMillis) { + super(parserConfiguration, null, nowInMillis); + } + + @Override + public InnerHitsRewriteContext convertToInnerHitsRewriteContext() { + return this; + } + + @Override + @SuppressWarnings({ "rawtypes" }) + public void executeAsyncActions(ActionListener listener) { + // InnerHitsRewriteContext does not support async actions at all, and doesn't supply a valid `client` object + throw new UnsupportedOperationException("InnerHitsRewriteContext does not support async actions"); + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java index 04ae0bb498841..47e4cf7273703 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java @@ -39,14 +39,14 @@ public MatchNoneQueryBuilder(String rewriteReason) { */ public MatchNoneQueryBuilder(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { rewriteReason = in.readOptionalString(); } } @Override protected void doWriteTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { out.writeOptionalString(rewriteReason); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index 9a8800c05bdb2..e36c4d608d59f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -159,6 +159,10 @@ public DataRewriteContext convertToDataRewriteContext() { return null; } + public InnerHitsRewriteContext convertToInnerHitsRewriteContext() { + return null; + } + /** * Returns the {@link MappedFieldType} for the provided field name. * If the field is not mapped, the behaviour depends on the index.query.parse.allow_unmapped_fields setting, which defaults to true. diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index 5a2b01838e27b..63cd598caa784 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -91,7 +91,7 @@ public final class SimpleQueryStringBuilder extends AbstractQueryBuilder searchFailures; private boolean timedOut; - private static final String TOOK_FIELD = "took"; - private static final String TIMED_OUT_FIELD = "timed_out"; - private static final String FAILURES_FIELD = "failures"; - - @SuppressWarnings("unchecked") - private static final ObjectParser PARSER = new ObjectParser<>( - "bulk_by_scroll_response", - true, - BulkByScrollResponseBuilder::new - ); - static { - PARSER.declareLong(BulkByScrollResponseBuilder::setTook, new ParseField(TOOK_FIELD)); - PARSER.declareBoolean(BulkByScrollResponseBuilder::setTimedOut, new ParseField(TIMED_OUT_FIELD)); - PARSER.declareObjectArray(BulkByScrollResponseBuilder::setFailures, (p, c) -> parseFailure(p), new ParseField(FAILURES_FIELD)); - // since the result of BulkByScrollResponse.Status are mixed we also parse that in this - Status.declareFields(PARSER); - } + static final String TOOK_FIELD = "took"; + static final String TIMED_OUT_FIELD = "timed_out"; + static final String FAILURES_FIELD = "failures"; public BulkByScrollResponse(StreamInput in) throws IOException { super(in); @@ -195,7 +171,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(TOOK_FIELD, took.millis()); builder.field(TIMED_OUT_FIELD, timedOut); status.innerXContent(builder, params); - builder.startArray("failures"); + builder.startArray(FAILURES_FIELD); for (Failure failure : bulkFailures) { builder.startObject(); failure.toXContent(builder, params); @@ -208,59 +184,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static BulkByScrollResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null).buildResponse(); - } - - private static Object parseFailure(XContentParser parser) throws IOException { - ensureExpectedToken(Token.START_OBJECT, parser.currentToken(), parser); - Token token; - String index = null; - String id = null; - Integer status = null; - Integer shardId = null; - String nodeId = null; - ElasticsearchException bulkExc = null; - ElasticsearchException searchExc = null; - while ((token = parser.nextToken()) != Token.END_OBJECT) { - ensureExpectedToken(Token.FIELD_NAME, token, parser); - String name = parser.currentName(); - token = parser.nextToken(); - if (token == Token.START_ARRAY) { - parser.skipChildren(); - } else if (token == Token.START_OBJECT) { - switch (name) { - case SearchFailure.REASON_FIELD -> searchExc = ElasticsearchException.fromXContent(parser); - case Failure.CAUSE_FIELD -> bulkExc = ElasticsearchException.fromXContent(parser); - default -> parser.skipChildren(); - } - } else if (token == Token.VALUE_STRING) { - switch (name) { - // This field is the same as SearchFailure.index - case Failure.INDEX_FIELD -> index = parser.text(); - case Failure.ID_FIELD -> id = parser.text(); - case SearchFailure.NODE_FIELD -> nodeId = parser.text(); - } - } else if (token == Token.VALUE_NUMBER) { - switch (name) { - case Failure.STATUS_FIELD -> status = parser.intValue(); - case SearchFailure.SHARD_FIELD -> shardId = parser.intValue(); - } - } - } - if (bulkExc != null) { - return new Failure(index, id, bulkExc, RestStatus.fromCode(status)); - } else if (searchExc != null) { - if (status == null) { - return new SearchFailure(searchExc, index, shardId, nodeId); - } else { - return new SearchFailure(searchExc, index, shardId, nodeId, RestStatus.fromCode(status)); - } - } else { - throw new ElasticsearchParseException("failed to parse failures array. At least one of {reason,cause} must be present"); - } - } - @Override public String toString() { StringBuilder builder = new StringBuilder(); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java b/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java index 5db1732fc1590..42cf8a185ec7a 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java @@ -157,8 +157,8 @@ private static class ClientHit implements Hit { private final BytesReference source; ClientHit(SearchHit delegate) { - this.delegate = delegate; - source = delegate.hasSource() ? delegate.getSourceRef() : null; + this.delegate = delegate.asUnpooled(); // TODO: use pooled version here + source = this.delegate.hasSource() ? this.delegate.getSourceRef() : null; } @Override diff --git a/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java b/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java index 807844d983135..8874c43c919ca 100644 --- a/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java +++ b/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java @@ -37,17 +37,23 @@ public class NodeMetrics extends AbstractLifecycleComponent { private final NodeService nodeService; private final List metrics; private NodeStatsCache stats; + private final TimeValue cacheExpiry; /** * Constructs a new NodeMetrics instance. * - * @param meterRegistry The MeterRegistry used to register metrics. - * @param nodeService The NodeService for interacting with the Elasticsearch node and extracting statistics. - */ - public NodeMetrics(MeterRegistry meterRegistry, NodeService nodeService) { + * @param meterRegistry The MeterRegistry used to register metrics. + * @param nodeService The NodeService for interacting with the Elasticsearch node and extracting statistics. + * @param metricsInterval The interval at which the agent sends metrics to the APM Server + * */ + public NodeMetrics(MeterRegistry meterRegistry, NodeService nodeService, TimeValue metricsInterval) { this.registry = meterRegistry; this.nodeService = nodeService; this.metrics = new ArrayList<>(17); + // we set the cache to expire after half the interval at which the agent sends + // metrics to the APM Server so that there is enough time for the cache not + // update during the same poll period and that expires before a new poll period + this.cacheExpiry = new TimeValue(metricsInterval.getMillis() / 2); } /** @@ -57,10 +63,7 @@ public NodeMetrics(MeterRegistry meterRegistry, NodeService nodeService) { * @param registry The MeterRegistry used to register and collect metrics. */ private void registerAsyncMetrics(MeterRegistry registry) { - // Agent should poll stats every 4 minutes and being this cache is lazy we need a - // number high enough so that the cache does not update during the same poll - // period and that expires before a new poll period, therefore we choose 1 minute. - this.stats = new NodeStatsCache(TimeValue.timeValueMinutes(1)); + this.stats = new NodeStatsCache(cacheExpiry); metrics.add( registry.registerLongAsyncCounter( "es.indices.get.total", diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index aa62ea689a5a9..1dae328752bdc 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -78,6 +78,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; @@ -183,6 +184,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -241,8 +243,8 @@ static NodeConstruction prepareConstruction( NodeConstruction constructor = new NodeConstruction(closeables); Settings settings = constructor.createEnvironment(initialEnvironment, serviceProvider); - - ThreadPool threadPool = constructor.createThreadPool(settings); + TelemetryProvider telemetryProvider = constructor.createTelemetryProvider(settings); + ThreadPool threadPool = constructor.createThreadPool(settings, telemetryProvider.getMeterRegistry()); SettingsModule settingsModule = constructor.validateSettings(initialEnvironment.settings(), settings, threadPool); SearchModule searchModule = constructor.createSearchModule(settingsModule.getSettings(), threadPool); @@ -257,7 +259,8 @@ static NodeConstruction prepareConstruction( scriptService, constructor.createAnalysisRegistry(), serviceProvider, - forbidPrivateIndexSettings + forbidPrivateIndexSettings, + telemetryProvider ); return constructor; @@ -448,9 +451,14 @@ private Settings createEnvironment(Environment initialEnvironment, NodeServicePr return settings; } - private ThreadPool createThreadPool(Settings settings) throws IOException { + private TelemetryProvider createTelemetryProvider(Settings settings) { + return getSinglePlugin(TelemetryPlugin.class).map(p -> p.getTelemetryProvider(settings)).orElse(TelemetryProvider.NOOP); + } + + private ThreadPool createThreadPool(Settings settings, MeterRegistry meterRegistry) throws IOException { ThreadPool threadPool = new ThreadPool( settings, + meterRegistry, pluginsService.flatMap(p -> p.getExecutorBuilders(settings)).toArray(ExecutorBuilder[]::new) ); resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); @@ -580,13 +588,12 @@ private void construct( ScriptService scriptService, AnalysisRegistry analysisRegistry, NodeServiceProvider serviceProvider, - boolean forbidPrivateIndexSettings + boolean forbidPrivateIndexSettings, + TelemetryProvider telemetryProvider ) throws IOException { Settings settings = settingsModule.getSettings(); - TelemetryProvider telemetryProvider = getSinglePlugin(TelemetryPlugin.class).map(p -> p.getTelemetryProvider(settings)) - .orElse(TelemetryProvider.NOOP); modules.bindToInstance(Tracer.class, telemetryProvider.getTracer()); TaskManager taskManager = new TaskManager( @@ -598,6 +605,7 @@ private void construct( ).collect(Collectors.toSet()), telemetryProvider.getTracer() ); + final Tracer tracer = telemetryProvider.getTracer(); ClusterService clusterService = createClusterService(settingsModule, threadPool, taskManager); clusterService.addStateApplier(scriptService); @@ -966,7 +974,8 @@ record PluginServiceInstances( repositoryService ); - final NodeMetrics nodeMetrics = new NodeMetrics(telemetryProvider.getMeterRegistry(), nodeService); + final TimeValue metricsInterval = settings.getAsTime("tracing.apm.agent.metrics_interval", TimeValue.timeValueSeconds(10)); + final NodeMetrics nodeMetrics = new NodeMetrics(telemetryProvider.getMeterRegistry(), nodeService, metricsInterval); final SearchService searchService = serviceProvider.newSearchService( pluginsService, diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java index 32188d55e418a..63c97685c913e 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.common.Strings; @@ -300,9 +300,9 @@ private void cancelTask(Long allocationId) { if (task.markAsCancelled()) { // Cancel the local task using the task manager String reason = "task has been removed, cancelling locally"; - persistentTasksService.sendCancelRequest(task.getId(), reason, new ActionListener() { + persistentTasksService.sendCancelRequest(task.getId(), reason, new ActionListener<>() { @Override - public void onResponse(CancelTasksResponse cancelTasksResponse) { + public void onResponse(ListTasksResponse cancelTasksResponse) { logger.trace( "Persistent task [{}] with id [{}] and allocation id [{}] was cancelled", task.getAction(), diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java index 869a93110d257..227569341919a 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.ClusterState; @@ -90,7 +90,7 @@ public void sendCompletionRequest( /** * Cancels a locally running task using the Task Manager API */ - void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { + void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { CancelTasksRequest request = new CancelTasksRequest(); request.setTargetTaskId(new TaskId(clusterService.localNode().getId(), taskId)); request.setReason(reason); diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java index 722779a646824..b9cce9e3ec500 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java @@ -28,7 +28,7 @@ public class RepositoriesStats implements Writeable, ToXContentFragment { private final Map repositoryThrottlingStats; public RepositoriesStats(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { repositoryThrottlingStats = in.readMap(ThrottlingStats::new); } else { repositoryThrottlingStats = new HashMap<>(); @@ -41,7 +41,7 @@ public RepositoriesStats(Map repositoryThrottlingStats) @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeMap(repositoryThrottlingStats, StreamOutput::writeWriteable); } } diff --git a/server/src/main/java/org/elasticsearch/rest/RestResponse.java b/server/src/main/java/org/elasticsearch/rest/RestResponse.java index 55adc67bf18e6..2bb7bdc41bcf9 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestResponse.java +++ b/server/src/main/java/org/elasticsearch/rest/RestResponse.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -21,7 +20,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; @@ -33,14 +31,13 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.rest.RestController.ELASTIC_PRODUCT_HTTP_HEADER; public final class RestResponse { public static final String TEXT_CONTENT_TYPE = "text/plain; charset=UTF-8"; - private static final String STATUS = "status"; + static final String STATUS = "status"; private static final Logger SUPPRESSED_ERROR_LOGGER = LogManager.getLogger("rest.suppressed"); @@ -189,42 +186,6 @@ static RestResponse createSimpleErrorResponse(RestChannel channel, RestStatus st ); } - public static ElasticsearchStatusException errorFromXContent(XContentParser parser) throws IOException { - XContentParser.Token token = parser.nextToken(); - ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); - - ElasticsearchException exception = null; - RestStatus status = null; - - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } - if (STATUS.equals(currentFieldName)) { - if (token != XContentParser.Token.FIELD_NAME) { - ensureExpectedToken(XContentParser.Token.VALUE_NUMBER, token, parser); - status = RestStatus.fromCode(parser.intValue()); - } - } else { - exception = ElasticsearchException.failureFromXContent(parser); - } - } - - if (exception == null) { - throw new IllegalStateException("Failed to parse elasticsearch status exception: no exception was found"); - } - - ElasticsearchStatusException result = new ElasticsearchStatusException(exception.getMessage(), status, exception.getCause()); - for (String header : exception.getHeaderKeys()) { - result.addHeader(header, exception.getHeader(header)); - } - for (String metadata : exception.getMetadataKeys()) { - result.addMetadata(metadata, exception.getMetadata(metadata)); - } - return result; - } - public void copyHeaders(ElasticsearchException ex) { Set headerKeySet = ex.getHeaderKeys(); if (customHeaders == null) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java index 4c9ac8fcb9a3c..815c3ce7e2c33 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java @@ -11,9 +11,9 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; @@ -65,7 +65,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (validationException != null) { throw validationException; } - final var responseListener = new SubscribableListener(); + final var responseListener = new SubscribableListener(); final var task = client.executeLocally(ForceMergeAction.INSTANCE, mergeRequest, responseListener); responseListener.addListener(new LoggingTaskListener<>(task)); return sendTask(client.getLocalNodeId(), task); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java index cf238d57c4cab..97964b09593f5 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java @@ -9,8 +9,8 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.core.RestApiVersion; @@ -55,14 +55,14 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC return channel -> client.admin().indices().flush(flushRequest, new SimulateSyncedFlushResponseListener(channel)); } - static final class SimulateSyncedFlushResponseListener extends RestBuilderListener { + static final class SimulateSyncedFlushResponseListener extends RestBuilderListener { SimulateSyncedFlushResponseListener(RestChannel channel) { super(channel); } @Override - public RestResponse buildResponse(FlushResponse flushResponse, XContentBuilder builder) throws Exception { + public RestResponse buildResponse(BroadcastResponse flushResponse, XContentBuilder builder) throws Exception { builder.startObject(); buildSyncedFlushResponse(builder, flushResponse); builder.endObject(); @@ -70,7 +70,7 @@ public RestResponse buildResponse(FlushResponse flushResponse, XContentBuilder b return new RestResponse(restStatus, builder); } - private static void buildSyncedFlushResponse(XContentBuilder builder, FlushResponse flushResponse) throws IOException { + private static void buildSyncedFlushResponse(XContentBuilder builder, BroadcastResponse flushResponse) throws IOException { builder.startObject("_shards"); builder.field("total", flushResponse.getTotalShards()); builder.field("successful", flushResponse.getSuccessfulShards()); diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index d2fc20ab83269..dde044bf15115 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -13,6 +13,7 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.document.DocumentField; @@ -24,7 +25,9 @@ import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.MapperService; @@ -35,6 +38,7 @@ import org.elasticsearch.search.fetch.subphase.LookupField; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.lookup.Source; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; @@ -55,6 +59,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -70,7 +75,7 @@ * * @see SearchHits */ -public final class SearchHit implements Writeable, ToXContentObject { +public final class SearchHit implements Writeable, ToXContentObject, RefCounted { private final transient int docId; @@ -114,6 +119,8 @@ public final class SearchHit implements Writeable, ToXContentObject { private Map innerHits; + private final RefCounted refCounted; + // used only in tests public SearchHit(int docId) { this(docId, null); @@ -124,6 +131,10 @@ public SearchHit(int docId, String id) { } public SearchHit(int nestedTopDocId, String id, NestedIdentity nestedIdentity) { + this(nestedTopDocId, id, nestedIdentity, null); + } + + private SearchHit(int nestedTopDocId, String id, NestedIdentity nestedIdentity, @Nullable RefCounted refCounted) { this( nestedTopDocId, DEFAULT_SCORE, @@ -142,8 +153,10 @@ public SearchHit(int nestedTopDocId, String id, NestedIdentity nestedIdentity) { null, null, null, + null, new HashMap<>(), - new HashMap<>() + new HashMap<>(), + refCounted ); } @@ -164,9 +177,11 @@ public SearchHit( SearchShardTarget shard, String index, String clusterAlias, + Map sourceAsMap, Map innerHits, Map documentFields, - Map metaFields + Map metaFields, + @Nullable RefCounted refCounted ) { this.docId = docId; this.score = score; @@ -184,12 +199,28 @@ public SearchHit( this.shard = shard; this.index = index; this.clusterAlias = clusterAlias; + this.sourceAsMap = sourceAsMap; this.innerHits = innerHits; this.documentFields = documentFields; this.metaFields = metaFields; + this.refCounted = refCounted == null ? LeakTracker.wrap(new AbstractRefCounted() { + @Override + protected void closeInternal() { + if (SearchHit.this.innerHits != null) { + for (SearchHits h : SearchHit.this.innerHits.values()) { + h.decRef(); + } + SearchHit.this.innerHits = null; + } + if (SearchHit.this.source instanceof RefCounted r) { + r.decRef(); + } + SearchHit.this.source = null; + } + }) : ALWAYS_REFERENCED; } - public static SearchHit readFrom(StreamInput in) throws IOException { + public static SearchHit readFrom(StreamInput in, boolean pooled) throws IOException { final float score = in.readFloat(); final int rank; if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { @@ -205,7 +236,7 @@ public static SearchHit readFrom(StreamInput in) throws IOException { final long version = in.readLong(); final long seqNo = in.readZLong(); final long primaryTerm = in.readVLong(); - BytesReference source = in.readBytesReference(); + BytesReference source = pooled ? in.readReleasableBytesReference() : in.readBytesReference(); if (source.length() == 0) { source = null; } @@ -244,7 +275,7 @@ public static SearchHit readFrom(StreamInput in) throws IOException { if (size > 0) { innerHits = Maps.newMapWithExpectedSize(size); for (int i = 0; i < size; i++) { - innerHits.put(in.readString(), new SearchHits(in)); + innerHits.put(in.readString(), SearchHits.readFrom(in, pooled)); } } else { innerHits = null; @@ -266,16 +297,31 @@ public static SearchHit readFrom(StreamInput in) throws IOException { shardTarget, index, clusterAlias, + null, innerHits, documentFields, - metaFields + metaFields, + pooled ? null : ALWAYS_REFERENCED ); } + public static SearchHit unpooled(int docId) { + return unpooled(docId, null); + } + + public static SearchHit unpooled(int docId, String id) { + return unpooled(docId, id, null); + } + + public static SearchHit unpooled(int nestedTopDocId, String id, NestedIdentity nestedIdentity) { + return new SearchHit(nestedTopDocId, id, nestedIdentity, ALWAYS_REFERENCED); + } + private static final Text SINGLE_MAPPING_TYPE = new Text(MapperService.SINGLE_MAPPING_NAME); @Override public void writeTo(StreamOutput out) throws IOException { + assert hasReferences(); out.writeFloat(score); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeVInt(rank); @@ -401,6 +447,7 @@ public NestedIdentity getNestedIdentity() { * Returns bytes reference, also uncompress the source if needed. */ public BytesReference getSourceRef() { + assert hasReferences(); if (this.source == null) { return null; } @@ -427,6 +474,7 @@ public SearchHit sourceRef(BytesReference source) { * {@code _source} or if source is disabled in the mapping. */ public boolean hasSource() { + assert hasReferences(); return source != null; } @@ -434,6 +482,7 @@ public boolean hasSource() { * The source of the document as string (can be {@code null}). */ public String getSourceAsString() { + assert hasReferences(); if (source == null) { return null; } @@ -448,6 +497,7 @@ public String getSourceAsString() { * The source of the document as a map (can be {@code null}). */ public Map getSourceAsMap() { + assert hasReferences(); if (source == null) { return null; } @@ -463,6 +513,7 @@ public Map getSourceAsMap() { * The hit field matching the given field name. */ public DocumentField field(String fieldName) { + assert hasReferences(); DocumentField result = documentFields.get(fieldName); if (result != null) { return result; @@ -653,13 +704,72 @@ public Map getMatchedQueriesAndScores() { * @return Inner hits or null if there are none */ public Map getInnerHits() { + assert hasReferences(); return innerHits; } public void setInnerHits(Map innerHits) { + assert innerHits == null || innerHits.values().stream().noneMatch(h -> h.hasReferences() == false); + assert this.innerHits == null; this.innerHits = innerHits; } + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + + public SearchHit asUnpooled() { + assert hasReferences(); + if (isPooled() == false) { + return this; + } + return new SearchHit( + docId, + score, + rank, + id, + nestedIdentity, + version, + seqNo, + primaryTerm, + source instanceof RefCounted ? new BytesArray(source.toBytesRef(), true) : source, + highlightFields, + sortValues, + matchedQueries, + explanation, + shard, + index, + clusterAlias, + sourceAsMap, + innerHits == null + ? null + : innerHits.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().asUnpooled())), + documentFields, + metaFields, + ALWAYS_REFERENCED + ); + } + + public boolean isPooled() { + return refCounted != ALWAYS_REFERENCED; + } + public static class Fields { static final String _INDEX = "_index"; static final String _ID = "_id"; @@ -690,6 +800,7 @@ public static class Fields { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + assert hasReferences(); builder.startObject(); toInnerXContent(builder, params); builder.endObject(); @@ -972,9 +1083,11 @@ public static SearchHit createFromMap(Map values) { shardTarget, index, clusterAlias, + null, get(Fields.INNER_HITS, values, null), get(DOCUMENT_FIELDS, values, Collections.emptyMap()), - get(METADATA_FIELDS, values, Collections.emptyMap()) + get(METADATA_FIELDS, values, Collections.emptyMap()), + ALWAYS_REFERENCED // TODO: do we ever want pooling here? ); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchHits.java b/server/src/main/java/org/elasticsearch/search/SearchHits.java index c689f928954d2..a5c9425ba754c 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHits.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHits.java @@ -18,8 +18,11 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; @@ -32,7 +35,7 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -public final class SearchHits implements Writeable, ChunkedToXContent, Iterable { +public final class SearchHits implements Writeable, ChunkedToXContent, RefCounted, Iterable { public static final SearchHit[] EMPTY = new SearchHit[0]; public static final SearchHits EMPTY_WITH_TOTAL_HITS = SearchHits.empty(new TotalHits(0, Relation.EQUAL_TO), 0); @@ -48,6 +51,8 @@ public final class SearchHits implements Writeable, ChunkedToXContent, Iterable< @Nullable private final Object[] collapseValues; + private final RefCounted refCounted; + public static SearchHits empty(@Nullable TotalHits totalHits, float maxScore) { return new SearchHits(EMPTY, totalHits, maxScore); } @@ -63,6 +68,35 @@ public SearchHits( @Nullable SortField[] sortFields, @Nullable String collapseField, @Nullable Object[] collapseValues + ) { + this( + hits, + totalHits, + maxScore, + sortFields, + collapseField, + collapseValues, + hits.length == 0 ? ALWAYS_REFERENCED : LeakTracker.wrap(new AbstractRefCounted() { + @Override + protected void closeInternal() { + for (int i = 0; i < hits.length; i++) { + assert hits[i] != null; + hits[i].decRef(); + hits[i] = null; + } + } + }) + ); + } + + private SearchHits( + SearchHit[] hits, + @Nullable TotalHits totalHits, + float maxScore, + @Nullable SortField[] sortFields, + @Nullable String collapseField, + @Nullable Object[] collapseValues, + RefCounted refCounted ) { this.hits = hits; this.totalHits = totalHits; @@ -70,32 +104,64 @@ public SearchHits( this.sortFields = sortFields; this.collapseField = collapseField; this.collapseValues = collapseValues; + this.refCounted = refCounted; } - public SearchHits(StreamInput in) throws IOException { + public static SearchHits unpooled(SearchHit[] hits, @Nullable TotalHits totalHits, float maxScore) { + return unpooled(hits, totalHits, maxScore, null, null, null); + } + + public static SearchHits unpooled( + SearchHit[] hits, + @Nullable TotalHits totalHits, + float maxScore, + @Nullable SortField[] sortFields, + @Nullable String collapseField, + @Nullable Object[] collapseValues + ) { + assert assertUnpooled(hits); + return new SearchHits(hits, totalHits, maxScore, sortFields, collapseField, collapseValues, ALWAYS_REFERENCED); + } + + private static boolean assertUnpooled(SearchHit[] searchHits) { + for (SearchHit searchHit : searchHits) { + assert searchHit.isPooled() == false : "hit was pooled [" + searchHit + "]"; + } + return true; + } + + public static SearchHits readFrom(StreamInput in, boolean pooled) throws IOException { + final TotalHits totalHits; if (in.readBoolean()) { totalHits = Lucene.readTotalHits(in); } else { // track_total_hits is false totalHits = null; } - maxScore = in.readFloat(); + final float maxScore = in.readFloat(); int size = in.readVInt(); + final SearchHit[] hits; if (size == 0) { hits = EMPTY; } else { hits = new SearchHit[size]; for (int i = 0; i < hits.length; i++) { - hits[i] = SearchHit.readFrom(in); + hits[i] = SearchHit.readFrom(in, pooled); } } - sortFields = in.readOptionalArray(Lucene::readSortField, SortField[]::new); - collapseField = in.readOptionalString(); - collapseValues = in.readOptionalArray(Lucene::readSortValue, Object[]::new); + var sortFields = in.readOptionalArray(Lucene::readSortField, SortField[]::new); + var collapseField = in.readOptionalString(); + var collapseValues = in.readOptionalArray(Lucene::readSortValue, Object[]::new); + if (pooled) { + return new SearchHits(hits, totalHits, maxScore, sortFields, collapseField, collapseValues); + } else { + return unpooled(hits, totalHits, maxScore, sortFields, collapseField, collapseValues); + } } @Override public void writeTo(StreamOutput out) throws IOException { + assert hasReferences(); final boolean hasTotalHits = totalHits != null; out.writeBoolean(hasTotalHits); if (hasTotalHits) { @@ -128,6 +194,7 @@ public float getMaxScore() { * The hits of the search request (based on the search type, and from / size provided). */ public SearchHit[] getHits() { + assert hasReferences(); return this.hits; } @@ -135,6 +202,7 @@ public SearchHit[] getHits() { * Return the hit as the provided position. */ public SearchHit getAt(int position) { + assert hasReferences(); return hits[position]; } @@ -165,9 +233,42 @@ public Object[] getCollapseValues() { @Override public Iterator iterator() { + assert hasReferences(); return Iterators.forArray(getHits()); } + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + + public SearchHits asUnpooled() { + assert hasReferences(); + if (refCounted == ALWAYS_REFERENCED) { + return this; + } + final SearchHit[] unpooledHits = new SearchHit[hits.length]; + for (int i = 0; i < hits.length; i++) { + unpooledHits[i] = hits[i].asUnpooled(); + } + return unpooled(unpooledHits, totalHits, maxScore, sortFields, collapseField, collapseValues); + } + public static final class Fields { public static final String HITS = "hits"; public static final String TOTAL = "total"; @@ -176,6 +277,7 @@ public static final class Fields { @Override public Iterator toXContentChunked(ToXContent.Params params) { + assert hasReferences(); return Iterators.concat(Iterators.single((b, p) -> b.startObject(Fields.HITS)), Iterators.single((b, p) -> { boolean totalHitAsInt = params.paramAsBoolean(RestSearchAction.TOTAL_HITS_AS_INT_PARAM, false); if (totalHitAsInt) { @@ -239,7 +341,7 @@ public static SearchHits fromXContent(XContentParser parser) throws IOException } } } - return new SearchHits(hits.toArray(SearchHits.EMPTY), totalHits, maxScore); + return SearchHits.unpooled(hits.toArray(SearchHits.EMPTY), totalHits, maxScore); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 8ba48563c8f55..5b17203ded132 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -245,6 +245,7 @@ import org.elasticsearch.search.suggest.phrase.StupidBackoff; import org.elasticsearch.search.suggest.term.TermSuggestion; import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; +import org.elasticsearch.search.vectors.ExactKnnQueryBuilder; import org.elasticsearch.search.vectors.KnnScoreDocQueryBuilder; import org.elasticsearch.search.vectors.KnnVectorQueryBuilder; import org.elasticsearch.search.vectors.QueryVectorBuilder; @@ -1130,6 +1131,9 @@ private void registerQueryParsers(List plugins) { registerQuery(new QuerySpec<>(KnnScoreDocQueryBuilder.NAME, KnnScoreDocQueryBuilder::new, parser -> { throw new IllegalArgumentException("[score_doc] queries cannot be provided directly"); })); + registerQuery(new QuerySpec<>(ExactKnnQueryBuilder.NAME, ExactKnnQueryBuilder::new, parser -> { + throw new IllegalArgumentException("[exact_knn] queries cannot be provided directly"); + })); registerFromPlugin(plugins, SearchPlugin::getQueries, this::registerQuery); diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 8a03c7e9f08ba..d5b2565187a3f 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -53,6 +53,7 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.CoordinatorRewriteContextProvider; import org.elasticsearch.index.query.InnerHitContextBuilder; +import org.elasticsearch.index.query.InnerHitsRewriteContext; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchNoneQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -1234,13 +1235,19 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc context.size(source.size()); Map innerHitBuilders = new HashMap<>(); QueryBuilder query = source.query(); + InnerHitsRewriteContext innerHitsRewriteContext = new InnerHitsRewriteContext( + context.getSearchExecutionContext().getParserConfig(), + context::getRelativeTimeInMillis + ); if (query != null) { - InnerHitContextBuilder.extractInnerHits(query, innerHitBuilders); + QueryBuilder rewrittenForInnerHits = Rewriteable.rewrite(query, innerHitsRewriteContext, true); + InnerHitContextBuilder.extractInnerHits(rewrittenForInnerHits, innerHitBuilders); searchExecutionContext.setAliasFilter(context.request().getAliasFilter().getQueryBuilder()); context.parsedQuery(searchExecutionContext.toQuery(query)); } if (source.postFilter() != null) { - InnerHitContextBuilder.extractInnerHits(source.postFilter(), innerHitBuilders); + QueryBuilder rewrittenForInnerHits = Rewriteable.rewrite(source.postFilter(), innerHitsRewriteContext, true); + InnerHitContextBuilder.extractInnerHits(rewrittenForInnerHits, innerHitBuilders); context.parsedPostFilter(searchExecutionContext.toQuery(source.postFilter())); } if (innerHitBuilders.size() > 0) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 449326b1d69bb..41b40755dc6e1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; @@ -232,6 +233,14 @@ public int hashCode() { this.downsampledResultsOffset = downsampledResultsOffset; } + boolean versionSupportsDownsamplingTimezone(TransportVersion version) { + return version.onOrAfter(TransportVersions.DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ) + || version.between( + TransportVersions.DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ_8_12_PATCH, + TransportVersions.NODE_STATS_REQUEST_SIMPLIFIED + ); + } + /** * Stream from a stream. */ @@ -247,7 +256,7 @@ public InternalDateHistogram(StreamInput in) throws IOException { offset = in.readLong(); format = in.readNamedWriteable(DocValueFormat.class); keyed = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersions.DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ)) { + if (versionSupportsDownsamplingTimezone(in.getTransportVersion())) { downsampledResultsOffset = in.readBoolean(); } else { downsampledResultsOffset = false; @@ -265,7 +274,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeLong(offset); out.writeNamedWriteable(format); out.writeBoolean(keyed); - if (out.getTransportVersion().onOrAfter(TransportVersions.DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ)) { + if (versionSupportsDownsamplingTimezone(out.getTransportVersion())) { out.writeBoolean(downsampledResultsOffset); } out.writeCollection(buckets); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java index 77cb482edd8b4..fd637e14581ca 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java @@ -52,7 +52,7 @@ public InternalTopHits( this.from = from; this.size = size; this.topDocs = topDocs; - this.searchHits = searchHits; + this.searchHits = searchHits.asUnpooled(); } /** @@ -63,7 +63,7 @@ public InternalTopHits(StreamInput in) throws IOException { from = in.readVInt(); size = in.readVInt(); topDocs = Lucene.readTopDocs(in); - searchHits = new SearchHits(in); + searchHits = SearchHits.readFrom(in, false); } @Override @@ -152,8 +152,9 @@ private static SearchHits extractSearchHits( position = tracker[shardIndex]++; } while (topDocsForShard.scoreDocs[position] != scoreDoc); hits[i] = ((InternalTopHits) aggregations.get(shardIndex)).searchHits.getAt(position); + assert hits[i].isPooled() == false; } - return new SearchHits(hits, reducedTopDocs.totalHits, maxScore); + return SearchHits.unpooled(hits, reducedTopDocs.totalHits, maxScore); } private static float reduceAndFindMaxScore(List aggregations, TopDocs[] shardDocs) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java index 8f5d3c1b9f322..c3816bef6f0aa 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java @@ -64,7 +64,7 @@ public MedianAbsoluteDeviationAggregationBuilder(String name) { public MedianAbsoluteDeviationAggregationBuilder(StreamInput in) throws IOException { super(in); compression = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { executionHint = in.readOptionalWriteable(TDigestExecutionHint::readFrom); } else { executionHint = TDigestExecutionHint.HIGH_ACCURACY; @@ -120,7 +120,7 @@ protected ValuesSourceType defaultValueSourceType() { @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeDouble(compression); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(executionHint); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java index d946ce3e14ea1..fedae36be0263 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java @@ -130,7 +130,7 @@ public TDigest(double compression, TDigestExecutionHint executionHint) { TDigest(StreamInput in) throws IOException { this( in.readDouble(), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) ? in.readOptionalWriteable(TDigestExecutionHint::readFrom) : TDigestExecutionHint.HIGH_ACCURACY ); @@ -235,7 +235,7 @@ public InternalNumericMetricsAggregation.MultiValue createEmptyPercentileRanksAg public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeDouble(compression); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(executionHint); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java index 23c26794f6bb5..0d0ed21556a92 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java @@ -107,7 +107,7 @@ public final double compression() { public static void write(TDigestState state, StreamOutput out) throws IOException { out.writeDouble(state.compression); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeString(state.type.toString()); out.writeVLong(state.tdigest.size()); } @@ -123,7 +123,7 @@ public static TDigestState read(StreamInput in) throws IOException { double compression = in.readDouble(); TDigestState state; long size = 0; - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { state = new TDigestState(Type.valueOf(in.readString()), compression); size = in.readVLong(); } else { diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 0211e43933ec3..bc4b2a85bab68 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -219,7 +219,7 @@ public SearchSourceBuilder(StreamInput in) throws IOException { indexBoosts = in.readCollectionAsList(IndexBoost::new); minScore = in.readOptionalFloat(); postQueryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { subSearchSourceBuilders = in.readCollectionAsList(SubSearchSourceBuilder::new); } else { QueryBuilder queryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); @@ -289,7 +289,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(indexBoosts); out.writeOptionalFloat(minScore); out.writeOptionalNamedWriteable(postQueryBuilder); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeCollection(subSearchSourceBuilders); } else if (out.getTransportVersion().before(TransportVersions.V_8_4_0) && subSearchSourceBuilders.size() >= 2) { throw new IllegalArgumentException("cannot serialize [sub_searches] to version [" + out.getTransportVersion() + "]"); diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java index 5d3288408c99b..dab127e8b4e56 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java @@ -42,6 +42,8 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.index.query.AbstractQueryBuilder.DEFAULT_BOOST; + /** * DFS phase of a search request, used to make scoring 100% accurate by collecting additional info from each shard before the query phase. * The additional information is used to better compare the scores coming from all the shards, which depend on local factors (e.g. idf). @@ -181,6 +183,8 @@ private static void executeKnnVectorQuery(SearchContext context) throws IOExcept SearchExecutionContext searchExecutionContext = context.getSearchExecutionContext(); List knnSearch = context.request().source().knnSearch(); List knnVectorQueryBuilders = knnSearch.stream().map(KnnSearchBuilder::toQueryBuilder).toList(); + // Since we apply boost during the DfsQueryPhase, we should not apply boost here: + knnVectorQueryBuilders.forEach(knnVectorQueryBuilder -> knnVectorQueryBuilder.boost(DEFAULT_BOOST)); if (context.request().getAliasFilter().getQueryBuilder() != null) { for (KnnVectorQueryBuilder knnVectorQueryBuilder : knnVectorQueryBuilders) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 91e4fb791f62d..c106d9b6f4cb2 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -82,6 +82,7 @@ public void execute(SearchContext context, int[] docIdsToLoad) { // Only set the shardResults if building search hits was successful if (hits != null) { context.fetchResult().shardResult(hits, profileResult); + hits.decRef(); } } } @@ -173,7 +174,7 @@ protected SearchHit nextDoc(int doc) throws IOException { } TotalHits totalHits = context.getTotalHits(); - return new SearchHits(hits, totalHits, context.getMaxScore()); + return SearchHits.unpooled(hits, totalHits, context.getMaxScore()); } List getProcessors(SearchShardTarget target, FetchContext context, Profiler profiler) { @@ -247,11 +248,12 @@ private static HitContext prepareNonNestedHitContext( String id = idLoader.getId(subDocId); if (id == null) { - SearchHit hit = new SearchHit(docId, null); + // TODO: can we use pooled buffers here as well? + SearchHit hit = SearchHit.unpooled(docId, null); Source source = Source.lazy(lazyStoredSourceLoader(profiler, subReaderContext, subDocId)); return new HitContext(hit, subReaderContext, subDocId, Map.of(), source); } else { - SearchHit hit = new SearchHit(docId, id); + SearchHit hit = SearchHit.unpooled(docId, id); Source source; if (requiresSource) { Timer timer = profiler.startLoadingSource(); @@ -328,7 +330,7 @@ private static HitContext prepareNestedHitContext( assert nestedIdentity != null; Source nestedSource = nestedIdentity.extractSource(rootSource); - SearchHit hit = new SearchHit(topDocId, rootId, nestedIdentity); + SearchHit hit = SearchHit.unpooled(topDocId, rootId, nestedIdentity); return new HitContext(hit, subReaderContext, nestedInfo.doc(), childFieldLoader.storedFields(), nestedSource); } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java index ea5ab13c2e8ee..cc39113f2009f 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java @@ -70,6 +70,11 @@ public final SearchHit[] iterate(SearchShardTarget shardTarget, IndexReader inde searchHits[docs[i].index] = nextDoc(docs[i].docId); } } catch (Exception e) { + for (SearchHit searchHit : searchHits) { + if (searchHit != null) { + searchHit.decRef(); + } + } throw new FetchPhaseExecutionException(shardTarget, "Error running fetch phase for doc [" + currentDoc + "]", e); } return searchHits; diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java index aa5c1f2cbd992..6cf924a239208 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java @@ -30,7 +30,12 @@ public final class FetchSearchResult extends SearchPhaseResult { private ProfileResult profileResult; - private final RefCounted refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> hits = null)); + private final RefCounted refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> { + if (hits != null) { + hits.decRef(); + hits = null; + } + })); public FetchSearchResult() {} @@ -42,12 +47,13 @@ public FetchSearchResult(ShardSearchContextId id, SearchShardTarget shardTarget) public FetchSearchResult(StreamInput in) throws IOException { super(in); contextId = new ShardSearchContextId(in); - hits = new SearchHits(in); + hits = SearchHits.readFrom(in, true); profileResult = in.readOptionalWriteable(ProfileResult::new); } @Override public void writeTo(StreamOutput out) throws IOException { + assert hasReferences(); contextId.writeTo(out); hits.writeTo(out); out.writeOptionalWriteable(profileResult); @@ -61,6 +67,7 @@ public FetchSearchResult fetchResult() { public void shardResult(SearchHits hits, ProfileResult profileResult) { assert assertNoSearchTarget(hits); this.hits = hits; + hits.incRef(); assert this.profileResult == null; this.profileResult = profileResult; } @@ -73,6 +80,7 @@ private static boolean assertNoSearchTarget(SearchHits hits) { } public SearchHits hits() { + assert hasReferences(); return hits; } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java index feb0547a32536..ccb54801472a6 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java @@ -103,7 +103,9 @@ private void hitExecute(Map innerHi searchHitFields.sortValues(fieldDoc.fields, innerHitsContext.sort().formats); } } - results.put(entry.getKey(), fetchResult.hits()); + var h = fetchResult.hits(); + results.put(entry.getKey(), h); + h.mustIncRef(); } } } diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 18ae708d8fec3..2023ee2e8d4b6 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -284,8 +284,7 @@ public ShardSearchRequest(StreamInput in) throws IOException { numberOfShards = in.readVInt(); scroll = in.readOptionalWriteable(Scroll::new); source = in.readOptionalWriteable(SearchSourceBuilder::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) - && in.getTransportVersion().before(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) && in.getTransportVersion().before(TransportVersions.V_8_9_X)) { // to deserialize between the 8.8 and 8.500.020 version we need to translate // the rank queries into sub searches if we are ranking; if there are no rank queries // we deserialize the empty list and do nothing @@ -360,8 +359,7 @@ protected final void innerWriteTo(StreamOutput out, boolean asKey) throws IOExce } out.writeOptionalWriteable(scroll); out.writeOptionalWriteable(source); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) - && out.getTransportVersion().before(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) && out.getTransportVersion().before(TransportVersions.V_8_9_X)) { // to serialize between the 8.8 and 8.500.020 version we need to translate // the sub searches into rank queries if we are ranking, otherwise, we // ignore this because linear combination will have multiple sub searches in diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java b/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java index 76ee7e09ad870..4c42daba22b7a 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java @@ -73,6 +73,8 @@ public static RescorerBuilder parseFromXContent(XContentParser parser, Consum RescorerBuilder rescorer = null; Integer windowSize = null; XContentParser.Token token; + String rescorerType = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); @@ -83,8 +85,11 @@ public static RescorerBuilder parseFromXContent(XContentParser parser, Consum throw new ParsingException(parser.getTokenLocation(), "rescore doesn't support [" + fieldName + "]"); } } else if (token == XContentParser.Token.START_OBJECT) { - rescorer = parser.namedObject(RescorerBuilder.class, fieldName, null); - rescorerNameConsumer.accept(fieldName); + if (fieldName != null) { + rescorer = parser.namedObject(RescorerBuilder.class, fieldName, null); + rescorerNameConsumer.accept(fieldName); + rescorerType = fieldName; + } } else { throw new ParsingException(parser.getTokenLocation(), "unexpected token [" + token + "] after [" + fieldName + "]"); } @@ -92,9 +97,13 @@ public static RescorerBuilder parseFromXContent(XContentParser parser, Consum if (rescorer == null) { throw new ParsingException(parser.getTokenLocation(), "missing rescore type"); } + if (windowSize != null) { rescorer.windowSize(windowSize.intValue()); + } else if (rescorer.isWindowSizeRequired()) { + throw new ParsingException(parser.getTokenLocation(), "window_size is required for rescorer of type [" + rescorerType + "]"); } + return rescorer; } @@ -111,11 +120,21 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws protected abstract void doXContent(XContentBuilder builder, Params params) throws IOException; + /** + * Indicate if the window_size is a required parameter for the rescorer. + */ + protected boolean isWindowSizeRequired() { + return false; + } + /** * Build the {@linkplain RescoreContext} that will be used to actually * execute the rescore against a particular shard. */ public final RescoreContext buildContext(SearchExecutionContext context) throws IOException { + if (isWindowSizeRequired()) { + assert windowSize != null; + } int finalWindowSize = windowSize == null ? DEFAULT_WINDOW_SIZE : windowSize; RescoreContext rescoreContext = innerBuildContext(finalWindowSize, context); return rescoreContext; diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java index 40ff9c6eaf6ee..7210c35d961ac 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java @@ -271,7 +271,9 @@ public Option(int docID, Text text, float score, Map> contex public Option(StreamInput in) throws IOException { super(in); this.doc = Lucene.readScoreDoc(in); - this.hit = in.readOptionalWriteable(SearchHit::readFrom); + if (in.readBoolean()) { + this.hit = SearchHit.readFrom(in, false); + } int contextSize = in.readInt(); this.contexts = Maps.newLinkedHashMapWithExpectedSize(contextSize); for (int i = 0; i < contextSize; i++) { @@ -309,7 +311,7 @@ public void setShardIndex(int shardIndex) { } public void setHit(SearchHit hit) { - this.hit = hit; + this.hit = hit == null ? null : hit.asUnpooled(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java new file mode 100644 index 0000000000000..d292f61dcb085 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.vectors; + +import org.apache.lucene.search.Query; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * Exact knn query builder. Will iterate and score all documents that have the provided knn field in the index. + * Useful in inner hits scoring scenarios. + */ +public class ExactKnnQueryBuilder extends AbstractQueryBuilder { + public static final String NAME = "exact_knn"; + private final String field; + private final float[] query; + + /** + * Creates a query builder. + * + * @param query the query vector + * @param field the field that was used for the kNN query + */ + public ExactKnnQueryBuilder(float[] query, String field) { + this.query = query; + this.field = field; + } + + public ExactKnnQueryBuilder(StreamInput in) throws IOException { + super(in); + this.query = in.readFloatArray(); + this.field = in.readString(); + } + + String getField() { + return field; + } + + float[] getQuery() { + return query; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeFloatArray(query); + out.writeString(field); + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + builder.field("query", query); + builder.field("field", field); + boostAndQueryNameToXContent(builder); + builder.endObject(); + } + + @Override + protected Query doToQuery(SearchExecutionContext context) throws IOException { + final MappedFieldType fieldType = context.getFieldType(field); + if (fieldType == null) { + throw new IllegalArgumentException("field [" + field + "] does not exist in the mapping"); + } + if (fieldType instanceof DenseVectorFieldMapper.DenseVectorFieldType == false) { + throw new IllegalArgumentException( + "[" + NAME + "] queries are only supported on [" + DenseVectorFieldMapper.CONTENT_TYPE + "] fields" + ); + } + final DenseVectorFieldMapper.DenseVectorFieldType vectorFieldType = (DenseVectorFieldMapper.DenseVectorFieldType) fieldType; + return vectorFieldType.createExactKnnQuery(query); + } + + @Override + protected boolean doEquals(ExactKnnQueryBuilder other) { + return field.equals(other.field) && Arrays.equals(query, other.query); + } + + @Override + protected int doHashCode() { + return Objects.hash(field, Arrays.hashCode(query)); + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { + return this; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.NESTED_KNN_MORE_INNER_HITS; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java index 13ca1d3dc1db2..ea9b2df942808 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java @@ -35,6 +35,8 @@ public class KnnScoreDocQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "knn_score_doc"; private final ScoreDoc[] scoreDocs; + private final String fieldName; + private final float[] queryVector; /** * Creates a query builder. @@ -42,13 +44,26 @@ public class KnnScoreDocQueryBuilder extends AbstractQueryBuilder rewrittenQueries = new ArrayList<>(filterQueries.size()); for (QueryBuilder query : filterQueries) { @@ -260,6 +263,7 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { DenseVectorFieldType vectorFieldType = (DenseVectorFieldType) fieldType; String parentPath = context.nestedLookup().getNestedParent(fieldName); + if (parentPath != null) { NestedObjectMapper originalObjectMapper = context.nestedScope().getObjectMapper(); if (originalObjectMapper != null) { diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java index 377c7b3847b0b..d1efb51e36856 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java @@ -67,7 +67,7 @@ public static TaskInfo from(StreamInput in) throws IOException { return new TaskInfo( taskId, in.readString(), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) ? in.readString() : taskId.getNodeId(), + in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X) ? in.readString() : taskId.getNodeId(), in.readString(), in.readOptionalString(), in.readOptionalNamedWriteable(Task.Status.class), @@ -84,7 +84,7 @@ public static TaskInfo from(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { taskId.writeTo(out); out.writeString(type); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { out.writeString(node); } out.writeString(action); diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index 17cafaee19bb4..fef0d93ec86cc 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -29,6 +29,11 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.node.Node; import org.elasticsearch.node.ReportingService; +import org.elasticsearch.telemetry.metric.Instrument; +import org.elasticsearch.telemetry.metric.LongAsyncCounter; +import org.elasticsearch.telemetry.metric.LongGauge; +import org.elasticsearch.telemetry.metric.LongWithAttributes; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; @@ -85,6 +90,13 @@ public static class Names { public static final String SYSTEM_CRITICAL_WRITE = "system_critical_write"; } + public static final String THREAD_POOL_METRIC_PREFIX = "es.thread_pool."; + public static final String THREAD_POOL_METRIC_NAME_COMPLETED = ".threads.completed.total"; + public static final String THREAD_POOL_METRIC_NAME_CURRENT = ".threads.count.current"; + public static final String THREAD_POOL_METRIC_NAME_QUEUE = ".threads.queue.size"; + public static final String THREAD_POOL_METRIC_NAME_ACTIVE = ".threads.active.current"; + public static final String THREAD_POOL_METRIC_NAME_LARGEST = ".threads.largest.current"; + public enum ThreadPoolType { DIRECT("direct"), FIXED("fixed"), @@ -153,6 +165,8 @@ public static ThreadPoolType fromType(String type) { private final long slowSchedulerWarnThresholdNanos; + private Map> instruments; + @SuppressWarnings("rawtypes") public Collection builders() { return Collections.unmodifiableCollection(builders.values()); @@ -180,7 +194,7 @@ public Collection builders() { ); @SuppressWarnings({ "rawtypes", "unchecked" }) - public ThreadPool(final Settings settings, final ExecutorBuilder... customBuilders) { + public ThreadPool(final Settings settings, MeterRegistry meterRegistry, final ExecutorBuilder... customBuilders) { assert Node.NODE_NAME_SETTING.exists(settings); final Map builders = new HashMap<>(); @@ -189,6 +203,7 @@ public ThreadPool(final Settings settings, final ExecutorBuilder... customBui final int halfProcMaxAt5 = halfAllocatedProcessorsMaxFive(allocatedProcessors); final int halfProcMaxAt10 = halfAllocatedProcessorsMaxTen(allocatedProcessors); final int genericThreadPoolMax = boundedBy(4 * allocatedProcessors, 128, 512); + final Map> instruments = new HashMap<>(); builders.put( Names.GENERIC, @@ -307,7 +322,8 @@ public ThreadPool(final Settings settings, final ExecutorBuilder... customBui executors.put(Names.SAME, new ExecutorHolder(EsExecutors.DIRECT_EXECUTOR_SERVICE, new Info(Names.SAME, ThreadPoolType.DIRECT))); this.executors = Map.copyOf(executors); - + this.executors.forEach((k, v) -> instruments.put(k, setupMetrics(meterRegistry, k, v))); + this.instruments = instruments; final List infos = executors.values() .stream() .filter(holder -> holder.info.getName().equals("same") == false) @@ -324,6 +340,59 @@ public ThreadPool(final Settings settings, final ExecutorBuilder... customBui this.cachedTimeThread.start(); } + private static ArrayList setupMetrics(MeterRegistry meterRegistry, String name, ExecutorHolder holder) { + Map at = Map.of(); + ArrayList instruments = new ArrayList<>(); + if (holder.executor() instanceof ThreadPoolExecutor threadPoolExecutor) { + String prefix = THREAD_POOL_METRIC_PREFIX + name; + instruments.add( + meterRegistry.registerLongGauge( + prefix + THREAD_POOL_METRIC_NAME_CURRENT, + "number of threads for " + name, + "count", + () -> new LongWithAttributes(threadPoolExecutor.getPoolSize(), at) + ) + ); + instruments.add( + meterRegistry.registerLongGauge( + prefix + THREAD_POOL_METRIC_NAME_QUEUE, + "number queue size for " + name, + "count", + () -> new LongWithAttributes(threadPoolExecutor.getQueue().size(), at) + ) + ); + instruments.add( + meterRegistry.registerLongGauge( + prefix + THREAD_POOL_METRIC_NAME_ACTIVE, + "number of active threads for " + name, + "count", + () -> new LongWithAttributes(threadPoolExecutor.getActiveCount(), at) + ) + ); + instruments.add( + meterRegistry.registerLongGauge( + prefix + THREAD_POOL_METRIC_NAME_LARGEST, + "largest pool size for " + name, + "count", + () -> new LongWithAttributes(threadPoolExecutor.getLargestPoolSize(), at) + ) + ); + instruments.add( + meterRegistry.registerLongAsyncCounter( + prefix + THREAD_POOL_METRIC_NAME_COMPLETED, + "number of completed threads for " + name, + "count", + () -> new LongWithAttributes(threadPoolExecutor.getCompletedTaskCount(), at) + ) + ); + RejectedExecutionHandler rejectedExecutionHandler = threadPoolExecutor.getRejectedExecutionHandler(); + if (rejectedExecutionHandler instanceof EsRejectedExecutionHandler handler) { + handler.registerCounter(meterRegistry, prefix, name); + } + } + return instruments; + } + // for subclassing by tests that don't actually use any of the machinery that the regular constructor sets up protected ThreadPool() { this.builders = Map.of(); @@ -541,11 +610,33 @@ protected final void stopCachedTimeThread() { cachedTimeThread.interrupt(); } + private void closeMetrics(ExecutorHolder executor) { + if (this.instruments.containsKey(executor.info.getName())) { + this.instruments.get(executor.info.getName()).forEach((instrument) -> { + if (instrument instanceof LongAsyncCounter longasynccounter) { + try { + longasynccounter.close(); + } catch (Exception e) { + logger.warn(format("Failed to close LongAsyncCounter for %s. %s", executor.info.getName(), e.getMessage()), e); + } + } else if (instrument instanceof LongGauge longgauge) { + try { + longgauge.close(); + } catch (Exception e) { + logger.warn(format("Failed to close LongGauge for %s. %s", executor.info.getName(), e.getMessage()), e); + } + } + }); + } + this.instruments.remove(executor.info.getName()); + } + public void shutdown() { stopCachedTimeThread(); scheduler.shutdown(); for (ExecutorHolder executor : executors.values()) { if (executor.executor() instanceof ThreadPoolExecutor) { + closeMetrics(executor); executor.executor().shutdown(); } } @@ -556,6 +647,7 @@ public void shutdownNow() { scheduler.shutdownNow(); for (ExecutorHolder executor : executors.values()) { if (executor.executor() instanceof ThreadPoolExecutor) { + closeMetrics(executor); executor.executor().shutdownNow(); } } @@ -565,6 +657,7 @@ public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedE boolean result = scheduler.awaitTermination(timeout, unit); for (ExecutorHolder executor : executors.values()) { if (executor.executor() instanceof ThreadPoolExecutor) { + closeMetrics(executor); result &= executor.executor().awaitTermination(timeout, unit); } } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java index fd5c39ec5fb1f..9e68557c05de6 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java @@ -39,7 +39,7 @@ */ public class RemoteClusterPortSettings { - public static final TransportVersion TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY = TransportVersions.V_8_500_061; + public static final TransportVersion TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY = TransportVersions.V_8_10_X; public static final String REMOTE_CLUSTER_PROFILE = "_remote_cluster"; public static final String REMOTE_CLUSTER_PREFIX = "remote_cluster."; diff --git a/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java b/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java index 75ab5db982235..04a0b3434814a 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java +++ b/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java @@ -18,22 +18,17 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.TreeMap; -import java.util.stream.Collectors; /** * Holds the results of the most recent attempt to migrate system indices. Updated by {@link SystemIndexMigrator} as it finishes each @@ -43,25 +38,7 @@ public class FeatureMigrationResults implements Metadata.Custom { public static final String TYPE = "system_index_migration"; public static final TransportVersion MIGRATION_ADDED_VERSION = TransportVersions.V_8_0_0; - private static final ParseField RESULTS_FIELD = new ParseField("results"); - - @SuppressWarnings("unchecked") - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(TYPE, a -> { - final Map statuses = ((List>) a[0]).stream() - .collect(Collectors.toMap(Tuple::v1, Tuple::v2)); - return new FeatureMigrationResults(statuses); - }); - - static { - PARSER.declareNamedObjects( - ConstructingObjectParser.constructorArg(), - (p, c, n) -> new Tuple<>(n, SingleFeatureMigrationResult.fromXContent(p)), - v -> { - throw new IllegalArgumentException("ordered " + RESULTS_FIELD.getPreferredName() + " are not supported"); - }, - RESULTS_FIELD - ); - } + static final ParseField RESULTS_FIELD = new ParseField("results"); private final Map featureStatuses; @@ -83,10 +60,6 @@ public Iterator toXContentChunked(ToXContent.Params ignore return ChunkedToXContentHelper.xContentValuesMap(RESULTS_FIELD.getPreferredName(), featureStatuses); } - public static FeatureMigrationResults fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - /** * Gets a map of feature name to that feature's status. Only contains features which have either been migrated successfully or * failed to migrate. diff --git a/server/src/main/java/org/elasticsearch/upgrades/SingleFeatureMigrationResult.java b/server/src/main/java/org/elasticsearch/upgrades/SingleFeatureMigrationResult.java index db1c325dfbb7f..24ed1943ed04e 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/SingleFeatureMigrationResult.java +++ b/server/src/main/java/org/elasticsearch/upgrades/SingleFeatureMigrationResult.java @@ -14,11 +14,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; @@ -27,10 +25,9 @@ * Holds the results of migrating a single feature. See also {@link FeatureMigrationResults}. */ public class SingleFeatureMigrationResult implements SimpleDiffable, Writeable, ToXContentObject { - private static final String NAME = "feature_migration_status"; - private static final ParseField SUCCESS_FIELD = new ParseField("successful"); - private static final ParseField FAILED_INDEX_NAME_FIELD = new ParseField("failed_index"); - private static final ParseField EXCEPTION_FIELD = new ParseField("exception"); + static final ParseField SUCCESS_FIELD = new ParseField("successful"); + static final ParseField FAILED_INDEX_NAME_FIELD = new ParseField("failed_index"); + static final ParseField EXCEPTION_FIELD = new ParseField("exception"); private final boolean successful; @Nullable @@ -38,23 +35,7 @@ public class SingleFeatureMigrationResult implements SimpleDiffable PARSER = new ConstructingObjectParser<>( - NAME, - a -> new SingleFeatureMigrationResult((boolean) a[0], (String) a[1], (Exception) a[2]) - ); - - static { - PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), SUCCESS_FIELD); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FAILED_INDEX_NAME_FIELD); - PARSER.declareObject( - ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> ElasticsearchException.fromXContent(p), - EXCEPTION_FIELD - ); - } - - private SingleFeatureMigrationResult(boolean successful, String failedIndexName, Exception exception) { + SingleFeatureMigrationResult(boolean successful, String failedIndexName, Exception exception) { this.successful = successful; if (successful == false) { Objects.requireNonNull(failedIndexName, "failed index name must be present for failed feature migration statuses"); @@ -75,10 +56,6 @@ private SingleFeatureMigrationResult(boolean successful, String failedIndexName, } } - public static SingleFeatureMigrationResult fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - /** * Creates a record indicating that migration succeeded. */ diff --git a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json index d8b4ed1ff93c9..46e32300e70fd 100644 --- a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json +++ b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json @@ -29,5 +29,6 @@ "BOOTSTRAP_CHECK_ROLE_MAPPINGS": "bootstrap-checks-xpack.html#_role_mappings_check", "BOOTSTRAP_CHECK_TLS": "bootstrap-checks-xpack.html#bootstrap-checks-tls", "BOOTSTRAP_CHECK_TOKEN_SSL": "bootstrap-checks-xpack.html#_token_ssl_check", - "BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP": "security-minimal-setup.html" + "BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP": "security-minimal-setup.html", + "CONTACT_SUPPORT": "troubleshooting.html#troubleshooting-contact-support" } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoActionTests.java index 38c811d367560..9c7fa266a0762 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoActionTests.java @@ -69,6 +69,7 @@ private ClusterFormationInfoAction.Response mutateResponse(ClusterFormationInfoA clusterFormationState.lastCommittedConfiguration(), clusterFormationState.resolvedAddresses(), clusterFormationState.foundPeers(), + clusterFormationState.mastersOfPeers(), clusterFormationState.currentTerm(), clusterFormationState.hasDiscoveredQuorum(), clusterFormationState.statusInfo(), @@ -88,6 +89,7 @@ private ClusterFormationInfoAction.Response mutateResponse(ClusterFormationInfoA clusterFormationState.lastCommittedConfiguration(), clusterFormationState.resolvedAddresses(), clusterFormationState.foundPeers(), + clusterFormationState.mastersOfPeers(), clusterFormationState.currentTerm(), clusterFormationState.hasDiscoveredQuorum(), clusterFormationState.statusInfo(), @@ -107,6 +109,7 @@ private ClusterFormationInfoAction.Response mutateResponse(ClusterFormationInfoA clusterFormationState.lastCommittedConfiguration(), clusterFormationState.resolvedAddresses(), clusterFormationState.foundPeers(), + clusterFormationState.mastersOfPeers(), clusterFormationState.currentTerm(), clusterFormationState.hasDiscoveredQuorum() == false, clusterFormationState.statusInfo(), @@ -148,6 +151,7 @@ private ClusterFormationFailureHelper.ClusterFormationState getClusterFormationS new CoordinationMetadata.VotingConfiguration(Collections.emptySet()), Collections.emptyList(), Collections.emptyList(), + Collections.emptySet(), randomLong(), randomBoolean(), new StatusInfo(randomFrom(StatusInfo.Status.HEALTHY, StatusInfo.Status.UNHEALTHY), randomAlphaOfLength(20)), diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index 345f85470a056..adefd71f93590 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.support.ActionTestUtils; @@ -289,7 +288,7 @@ public void onFailure(Exception e) { request.setReason("Testing Cancellation"); request.setTargetTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); // And send the cancellation request to a random node - CancelTasksResponse response = ActionTestUtils.executeBlocking( + ListTasksResponse response = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction, request ); @@ -368,7 +367,7 @@ public void onFailure(Exception e) { request.setReason("Testing Cancellation"); request.setTargetParentTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); // And send the cancellation request to a random node - CancelTasksResponse response = ActionTestUtils.executeBlocking( + ListTasksResponse response = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(1, testNodes.length - 1)].transportCancelTasksAction, request ); @@ -487,7 +486,7 @@ public void onFailure(Exception e) { request.setReason("Testing Cancellation"); request.setTargetTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); // And send the cancellation request to a random node - CancelTasksResponse response = ActionTestUtils.executeBlocking(testNodes[0].transportCancelTasksAction, request); + ListTasksResponse response = ActionTestUtils.executeBlocking(testNodes[0].transportCancelTasksAction, request); logger.info("--> Done simulating issuing cancel request on the node that is about to leave the cluster"); // This node still thinks that's part of the cluster, so cancelling should look successful assertThat(response.getTasks().size(), lessThanOrEqualTo(1)); @@ -544,7 +543,7 @@ public void testNonExistingTaskCancellation() throws Exception { randomSubsetOf(randomIntBetween(1, testNodes.length - 1), testNodes).stream().map(TestNode::getNodeId).toArray(String[]::new) ); // And send the cancellation request to a random node - CancelTasksResponse response = ActionTestUtils.executeBlocking( + ListTasksResponse response = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(1, testNodes.length - 1)].transportCancelTasksAction, request ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 86ccd9807cf9f..7168b2c1edcdd 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; @@ -521,7 +520,7 @@ public void testCancellingTasksThatDontSupportCancellation() throws Exception { request.setNodes(testNodes[0].getNodeId()); request.setReason("Testing Cancellation"); request.setActions(actionName); - CancelTasksResponse response = ActionTestUtils.executeBlocking( + ListTasksResponse response = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction, request ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java index ff70e7e6756ed..f8d3871fbfa8f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java @@ -38,7 +38,7 @@ protected GetScriptLanguageResponse createTestInstance() { @Override protected GetScriptLanguageResponse doParseInstance(XContentParser parser) throws IOException { - return GetScriptLanguageResponse.fromXContent(parser); + return new GetScriptLanguageResponse(ScriptLanguagesInfo.fromXContent(parser)); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java index 500080fa9f118..05820c071052c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java @@ -12,6 +12,8 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.StoredScriptSource; import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; @@ -20,11 +22,41 @@ import java.util.Map; import java.util.function.Predicate; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + public class GetStoredScriptResponseTests extends AbstractXContentSerializingTestCase { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "GetStoredScriptResponse", + true, + (a, c) -> { + String id = (String) a[0]; + boolean found = (Boolean) a[1]; + StoredScriptSource scriptSource = (StoredScriptSource) a[2]; + return found ? new GetStoredScriptResponse(id, scriptSource) : new GetStoredScriptResponse(id, null); + } + ); + + static { + PARSER.declareField(constructorArg(), (p, c) -> p.text(), GetStoredScriptResponse._ID_PARSE_FIELD, ObjectParser.ValueType.STRING); + PARSER.declareField( + constructorArg(), + (p, c) -> p.booleanValue(), + GetStoredScriptResponse.FOUND_PARSE_FIELD, + ObjectParser.ValueType.BOOLEAN + ); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> StoredScriptSource.fromXContent(p, true), + GetStoredScriptResponse.SCRIPT, + ObjectParser.ValueType.OBJECT + ); + } + @Override protected GetStoredScriptResponse doParseInstance(XContentParser parser) throws IOException { - return GetStoredScriptResponse.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponseTests.java deleted file mode 100644 index 4f3b14cd986c1..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponseTests.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.cache.clear; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.test.AbstractBroadcastResponseTestCase; -import org.elasticsearch.xcontent.XContentParser; - -import java.util.List; - -public class ClearIndicesCacheResponseTests extends AbstractBroadcastResponseTestCase { - - @Override - protected ClearIndicesCacheResponse createTestInstance( - int totalShards, - int successfulShards, - int failedShards, - List failures - ) { - return new ClearIndicesCacheResponse(totalShards, successfulShards, failedShards, failures); - } - - @Override - protected ClearIndicesCacheResponse doParseInstance(XContentParser parser) { - return ClearIndicesCacheResponse.fromXContent(parser); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushResponseTests.java deleted file mode 100644 index 62611060ce25d..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushResponseTests.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.flush; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.test.AbstractBroadcastResponseTestCase; -import org.elasticsearch.xcontent.XContentParser; - -import java.util.List; - -public class FlushResponseTests extends AbstractBroadcastResponseTestCase { - - @Override - protected FlushResponse createTestInstance( - int totalShards, - int successfulShards, - int failedShards, - List failures - ) { - return new FlushResponse(totalShards, successfulShards, failedShards, failures); - } - - @Override - protected FlushResponse doParseInstance(XContentParser parser) { - return FlushResponse.fromXContent(parser); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java deleted file mode 100644 index ed1160edeb8f5..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.forcemerge; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.test.AbstractBroadcastResponseTestCase; -import org.elasticsearch.xcontent.XContentParser; - -import java.util.List; - -public class ForceMergeResponseTests extends AbstractBroadcastResponseTestCase { - @Override - protected ForceMergeResponse createTestInstance( - int totalShards, - int successfulShards, - int failedShards, - List failures - ) { - return new ForceMergeResponse(totalShards, successfulShards, failedShards, failures); - } - - @Override - protected ForceMergeResponse doParseInstance(XContentParser parser) { - return ForceMergeResponse.fromXContent(parser); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java deleted file mode 100644 index 5a3183b3e61b9..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.refresh; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.test.AbstractBroadcastResponseTestCase; -import org.elasticsearch.xcontent.XContentParser; - -import java.util.List; - -public class RefreshResponseTests extends AbstractBroadcastResponseTestCase { - - @Override - protected RefreshResponse createTestInstance( - int totalShards, - int successfulShards, - int failedShards, - List failures - ) { - return new RefreshResponse(totalShards, successfulShards, failedShards, failures); - } - - @Override - protected RefreshResponse doParseInstance(XContentParser parser) { - return RefreshResponse.fromXContent(parser); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponseTests.java deleted file mode 100644 index b9f3e8b89a214..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponseTests.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.shrink; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.test.AbstractXContentSerializingTestCase; -import org.elasticsearch.xcontent.XContentParser; - -public class ResizeResponseTests extends AbstractXContentSerializingTestCase { - - public void testToXContent() { - ResizeResponse response = new ResizeResponse(true, false, "index_name"); - String output = Strings.toString(response); - assertEquals(""" - {"acknowledged":true,"shards_acknowledged":false,"index":"index_name"}""", output); - } - - @Override - protected ResizeResponse doParseInstance(XContentParser parser) { - return ResizeResponse.fromXContent(parser); - } - - @Override - protected ResizeResponse createTestInstance() { - boolean acknowledged = randomBoolean(); - boolean shardsAcknowledged = acknowledged && randomBoolean(); - String index = randomAlphaOfLength(5); - return new ResizeResponse(acknowledged, shardsAcknowledged, index); - } - - @Override - protected Writeable.Reader instanceReader() { - return ResizeResponse::new; - } - - @Override - protected ResizeResponse mutateInstance(ResizeResponse response) { - if (randomBoolean()) { - if (randomBoolean()) { - boolean acknowledged = response.isAcknowledged() == false; - boolean shardsAcknowledged = acknowledged && response.isShardsAcknowledged(); - return new ResizeResponse(acknowledged, shardsAcknowledged, response.index()); - } else { - boolean shardsAcknowledged = response.isShardsAcknowledged() == false; - boolean acknowledged = shardsAcknowledged || response.isAcknowledged(); - return new ResizeResponse(acknowledged, shardsAcknowledged, response.index()); - } - } else { - return new ResizeResponse( - response.isAcknowledged(), - response.isShardsAcknowledged(), - response.index() + randomAlphaOfLengthBetween(2, 5) - ); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 1276f6c2db58b..75833052dd4c8 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -143,8 +143,7 @@ void createIndex(String index, TimeValue timeout, ActionListener items = new ArrayList<>(); + for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) { + switch (token) { + case FIELD_NAME: + currentFieldName = parser.currentName(); + break; + case START_ARRAY: + if (MultiGetResponse.DOCS.getPreferredName().equals(currentFieldName)) { + for (token = parser.nextToken(); token != XContentParser.Token.END_ARRAY; token = parser.nextToken()) { + if (token == XContentParser.Token.START_OBJECT) { + items.add(parseItem(parser)); + } + } + } + break; + default: + // If unknown tokens are encounter then these should be ignored, because + // this is parsing logic on the client side. + break; + } + } + return new MultiGetResponse(items.toArray(new MultiGetItemResponse[0])); + } + + private static MultiGetItemResponse parseItem(XContentParser parser) throws IOException { + String currentFieldName = null; + String index = null; + String id = null; + ElasticsearchException exception = null; + GetResult getResult = null; + for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) { + switch (token) { + case FIELD_NAME: + currentFieldName = parser.currentName(); + if (MultiGetResponse.INDEX.match(currentFieldName, parser.getDeprecationHandler()) == false + && MultiGetResponse.ID.match(currentFieldName, parser.getDeprecationHandler()) == false + && ERROR.match(currentFieldName, parser.getDeprecationHandler()) == false) { + getResult = GetResult.fromXContentEmbedded(parser, index, id); + } + break; + case VALUE_STRING: + if (MultiGetResponse.INDEX.match(currentFieldName, parser.getDeprecationHandler())) { + index = parser.text(); + } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { + deprecationLogger.compatibleCritical("mget_with_types", RestMultiGetAction.TYPES_DEPRECATION_MESSAGE); + } else if (MultiGetResponse.ID.match(currentFieldName, parser.getDeprecationHandler())) { + id = parser.text(); + } + break; + case START_OBJECT: + if (ERROR.match(currentFieldName, parser.getDeprecationHandler())) { + exception = ElasticsearchException.fromXContent(parser); + } + break; + default: + // If unknown tokens are encounter then these should be ignored, because + // this is parsing logic on the client side. + break; + } + if (getResult != null) { + break; + } + } + + if (exception != null) { + return new MultiGetItemResponse(null, new MultiGetResponse.Failure(index, id, exception)); + } else { + GetResponse getResponse = new GetResponse(getResult); + return new MultiGetItemResponse(getResponse, null); + } + } + } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java index 4954406c14db0..1ec54638f9687 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java @@ -8,9 +8,12 @@ package org.elasticsearch.action.ingest; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -21,13 +24,65 @@ import java.util.function.Predicate; import java.util.function.Supplier; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.nullValue; public class SimulatePipelineResponseTests extends AbstractXContentTestCase { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "simulate_pipeline_response", + true, + a -> { + List results = (List) a[0]; + boolean verbose = false; + if (results.size() > 0) { + if (results.get(0) instanceof SimulateDocumentVerboseResult) { + verbose = true; + } + } + return new SimulatePipelineResponse(null, verbose, results); + } + ); + static { + PARSER.declareObjectArray(constructorArg(), (parser, context) -> { + XContentParser.Token token = parser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + SimulateDocumentResult result = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + String fieldName = parser.currentName(); + token = parser.nextToken(); + if (token == XContentParser.Token.START_ARRAY) { + if (fieldName.equals(SimulateDocumentVerboseResult.PROCESSOR_RESULT_FIELD)) { + List results = new ArrayList<>(); + while ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) { + results.add(SimulateProcessorResult.fromXContent(parser)); + } + ensureExpectedToken(XContentParser.Token.END_ARRAY, token, parser); + result = new SimulateDocumentVerboseResult(results); + } else { + parser.skipChildren(); + } + } else if (token.equals(XContentParser.Token.START_OBJECT)) { + switch (fieldName) { + case WriteableIngestDocument.DOC_FIELD -> result = new SimulateDocumentBaseResult( + WriteableIngestDocument.INGEST_DOC_PARSER.apply(parser, null).getIngestDocument() + ); + case "error" -> result = new SimulateDocumentBaseResult(ElasticsearchException.fromXContent(parser)); + default -> parser.skipChildren(); + } + } // else it is a value skip it + } + assert result != null; + return result; + }, new ParseField(SimulatePipelineResponse.Fields.DOCUMENTS)); + } + public void testSerialization() throws IOException { boolean isVerbose = randomBoolean(); String id = randomBoolean() ? randomAlphaOfLengthBetween(1, 10) : null; @@ -118,7 +173,7 @@ protected SimulatePipelineResponse createTestInstance() { @Override protected SimulatePipelineResponse doParseInstance(XContentParser parser) { - return SimulatePipelineResponse.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java index 0288a5b92c772..607d83d4aab31 100644 --- a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java @@ -134,8 +134,7 @@ public void testTookWithRealClock() { private void runTestTook(final boolean controlled) { final AtomicLong expected = new AtomicLong(); - var result = new ArraySearchPhaseResults<>(10); - try { + try (var result = new ArraySearchPhaseResults<>(10)) { AbstractSearchAsyncAction action = createAction(new SearchRequest(), result, null, controlled, expected); final long actual = action.buildTookInMillis(); if (controlled) { @@ -145,16 +144,13 @@ private void runTestTook(final boolean controlled) { // with a real clock, the best we can say is that it took as long as we spun for assertThat(actual, greaterThanOrEqualTo(TimeUnit.NANOSECONDS.toMillis(expected.get()))); } - } finally { - result.decRef(); } } public void testBuildShardSearchTransportRequest() { SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(randomBoolean()); final AtomicLong expected = new AtomicLong(); - var result = new ArraySearchPhaseResults<>(10); - try { + try (var result = new ArraySearchPhaseResults<>(10)) { AbstractSearchAsyncAction action = createAction(searchRequest, result, null, false, expected); String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10); SearchShardIterator iterator = new SearchShardIterator( @@ -170,8 +166,6 @@ public void testBuildShardSearchTransportRequest() { assertEquals(2.0f, shardSearchTransportRequest.indexBoost(), 0.0f); assertArrayEquals(new String[] { "name", "name1" }, shardSearchTransportRequest.indices()); assertEquals(clusterAlias, shardSearchTransportRequest.getClusterAlias()); - } finally { - result.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumerTests.java b/server/src/test/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumerTests.java index 33e6096bab763..4a7d0cc8208e2 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumerTests.java @@ -41,8 +41,9 @@ public void testProgressListenerExceptionsAreCaught() throws Exception { ); searchProgressListener.notifyListShards(searchShards, Collections.emptyList(), SearchResponse.Clusters.EMPTY, false, timeProvider); - CountOnlyQueryPhaseResultConsumer queryPhaseResultConsumer = new CountOnlyQueryPhaseResultConsumer(searchProgressListener, 10); - try { + try ( + CountOnlyQueryPhaseResultConsumer queryPhaseResultConsumer = new CountOnlyQueryPhaseResultConsumer(searchProgressListener, 10) + ) { AtomicInteger nextCounter = new AtomicInteger(0); for (int i = 0; i < 10; i++) { SearchShardTarget searchShardTarget = new SearchShardTarget("node", new ShardId("index", "uuid", i), null); @@ -58,14 +59,16 @@ public void testProgressListenerExceptionsAreCaught() throws Exception { queryPhaseResultConsumer.reduce(); assertEquals(1, searchProgressListener.onFinalReduce.get()); assertEquals(10, nextCounter.get()); - } finally { - queryPhaseResultConsumer.decRef(); } } public void testNullShardResultHandling() throws Exception { - CountOnlyQueryPhaseResultConsumer queryPhaseResultConsumer = new CountOnlyQueryPhaseResultConsumer(SearchProgressListener.NOOP, 10); - try { + try ( + CountOnlyQueryPhaseResultConsumer queryPhaseResultConsumer = new CountOnlyQueryPhaseResultConsumer( + SearchProgressListener.NOOP, + 10 + ) + ) { AtomicInteger nextCounter = new AtomicInteger(0); for (int i = 0; i < 10; i++) { SearchShardTarget searchShardTarget = new SearchShardTarget("node", new ShardId("index", "uuid", i), null); @@ -79,20 +82,20 @@ public void testNullShardResultHandling() throws Exception { assertEquals(TotalHits.Relation.EQUAL_TO, reducePhase.totalHits().relation); assertFalse(reducePhase.isEmptyResult()); assertEquals(10, nextCounter.get()); - } finally { - queryPhaseResultConsumer.decRef(); } } public void testEmptyResults() throws Exception { - CountOnlyQueryPhaseResultConsumer queryPhaseResultConsumer = new CountOnlyQueryPhaseResultConsumer(SearchProgressListener.NOOP, 10); - try { + try ( + CountOnlyQueryPhaseResultConsumer queryPhaseResultConsumer = new CountOnlyQueryPhaseResultConsumer( + SearchProgressListener.NOOP, + 10 + ) + ) { var reducePhase = queryPhaseResultConsumer.reduce(); assertEquals(0, reducePhase.totalHits().value); assertEquals(TotalHits.Relation.EQUAL_TO, reducePhase.totalHits().relation); assertTrue(reducePhase.isEmptyResult()); - } finally { - queryPhaseResultConsumer.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java b/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java index 838e13d6026c7..bc31f5f92f9b5 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java @@ -23,8 +23,7 @@ public class CountedCollectorTests extends ESTestCase { public void testCollect() throws InterruptedException { - ArraySearchPhaseResults consumer = new ArraySearchPhaseResults<>(randomIntBetween(1, 100)); - try { + try (ArraySearchPhaseResults consumer = new ArraySearchPhaseResults<>(randomIntBetween(1, 100))) { List state = new ArrayList<>(); int numResultsExpected = randomIntBetween(1, consumer.getAtomicArray().length()); MockSearchPhaseContext context = new MockSearchPhaseContext(consumer.getAtomicArray().length()); @@ -93,8 +92,6 @@ public void testCollect() throws InterruptedException { for (int i = numResultsExpected; i < results.length(); i++) { assertNull("index: " + i, results.get(i)); } - } finally { - consumer.decRef(); } } } diff --git a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java index b14d24cf95f62..e9ff8336ef4c9 100644 --- a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java @@ -125,16 +125,17 @@ public void sendExecuteQuery( SearchPhaseController searchPhaseController = searchPhaseController(); MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); mockSearchPhaseContext.searchTransport = searchTransportService; - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.searchRequest, - results.length(), - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.searchRequest, + results.length(), + exc -> {} + ) + ) { DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, null, consumer, (response) -> new SearchPhase("test") { @Override public void run() throws IOException { @@ -155,8 +156,6 @@ public void run() throws IOException { assertEquals(84, responseRef.get().get(1).queryResult().topDocs().topDocs.scoreDocs[0].doc); assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); assertEquals(2, mockSearchPhaseContext.numSuccess.get()); - } finally { - consumer.decRef(); } } @@ -211,16 +210,17 @@ public void sendExecuteQuery( SearchPhaseController searchPhaseController = searchPhaseController(); MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); mockSearchPhaseContext.searchTransport = searchTransportService; - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.searchRequest, - results.length(), - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.searchRequest, + results.length(), + exc -> {} + ) + ) { DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, null, consumer, (response) -> new SearchPhase("test") { @Override public void run() throws IOException { @@ -243,8 +243,6 @@ public void run() throws IOException { assertEquals(1, mockSearchPhaseContext.releasedSearchContexts.size()); assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(new ShardSearchContextId("", 2L))); assertNull(responseRef.get().get(1)); - } finally { - consumer.decRef(); } } @@ -299,16 +297,17 @@ public void sendExecuteQuery( SearchPhaseController searchPhaseController = searchPhaseController(); MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); mockSearchPhaseContext.searchTransport = searchTransportService; - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.searchRequest, - results.length(), - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.searchRequest, + results.length(), + exc -> {} + ) + ) { DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, null, consumer, (response) -> new SearchPhase("test") { @Override public void run() throws IOException { @@ -320,8 +319,6 @@ public void run() throws IOException { assertThat(mockSearchPhaseContext.failures, hasSize(1)); assertThat(mockSearchPhaseContext.failures.get(0).getCause(), instanceOf(UncheckedIOException.class)); assertThat(mockSearchPhaseContext.releasedSearchContexts, hasSize(1)); // phase execution will clean up on the contexts - } finally { - consumer.decRef(); } } @@ -339,15 +336,28 @@ public void testRewriteShardSearchRequestWithRank() { QueryBuilder bm25 = new TermQueryBuilder("field", "term"); SearchSourceBuilder ssb = new SearchSourceBuilder().query(bm25) - .knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0.0f }, 10, 100, null))) + .knnSearch( + List.of( + new KnnSearchBuilder("vector", new float[] { 0.0f }, 10, 100, null), + new KnnSearchBuilder("vector2", new float[] { 0.0f }, 10, 100, null) + ) + ) .rankBuilder(new TestRankBuilder(100)); SearchRequest sr = new SearchRequest().allowPartialSearchResults(true).source(ssb); ShardSearchRequest ssr = new ShardSearchRequest(null, sr, new ShardId("test", "testuuid", 1), 1, 1, null, 1.0f, 0, null); dqp.rewriteShardSearchRequest(ssr); - KnnScoreDocQueryBuilder ksdqb0 = new KnnScoreDocQueryBuilder(new ScoreDoc[] { new ScoreDoc(1, 3.0f, 1), new ScoreDoc(4, 1.5f, 1) }); - KnnScoreDocQueryBuilder ksdqb1 = new KnnScoreDocQueryBuilder(new ScoreDoc[] { new ScoreDoc(1, 2.0f, 1) }); + KnnScoreDocQueryBuilder ksdqb0 = new KnnScoreDocQueryBuilder( + new ScoreDoc[] { new ScoreDoc(1, 3.0f, 1), new ScoreDoc(4, 1.5f, 1) }, + "vector", + new float[] { 0.0f } + ); + KnnScoreDocQueryBuilder ksdqb1 = new KnnScoreDocQueryBuilder( + new ScoreDoc[] { new ScoreDoc(1, 2.0f, 1) }, + "vector2", + new float[] { 0.0f } + ); assertEquals( List.of(bm25, ksdqb0, ksdqb1), List.of( diff --git a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java index 648cb8aa60158..0a98b12444f9c 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java @@ -42,8 +42,8 @@ public void testCollapseSingleHit() throws IOException { final int numInnerHits = randomIntBetween(1, 5); List collapsedHits = new ArrayList<>(numInnerHits); for (int innerHitNum = 0; innerHitNum < numInnerHits; innerHitNum++) { - SearchHits hits = new SearchHits( - new SearchHit[] { new SearchHit(innerHitNum, "ID"), new SearchHit(innerHitNum + 1, "ID") }, + SearchHits hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(innerHitNum, "ID"), SearchHit.unpooled(innerHitNum + 1, "ID") }, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 1.0F ); @@ -98,6 +98,8 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL sections.decRef(); } mSearchResponses.add(new MultiSearchResponse.Item(mockSearchPhaseContext.searchResponse.get(), null)); + // transferring ownership to the multi-search response so no need to release here + mockSearchPhaseContext.searchResponse.set(null); } ActionListener.respondAndRelease( @@ -110,37 +112,43 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL SearchHit hit = new SearchHit(1, "ID"); hit.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(collapseValue))); SearchHits hits = new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { - @Override - public void run() { - var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); - try { - mockSearchPhaseContext.sendSearchResponse(sections, null); - } finally { - sections.decRef(); + try { + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { + @Override + public void run() { + var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); + try { + mockSearchPhaseContext.sendSearchResponse(sections, null); + } finally { + sections.decRef(); + } } - } - }); + }); - phase.run(); - mockSearchPhaseContext.assertNoFailure(); - SearchResponse theResponse = mockSearchPhaseContext.searchResponse.get(); - assertNotNull(theResponse); - assertEquals(numInnerHits, theResponse.getHits().getHits()[0].getInnerHits().size()); + phase.run(); + mockSearchPhaseContext.assertNoFailure(); + SearchResponse theResponse = mockSearchPhaseContext.searchResponse.get(); + assertNotNull(theResponse); + assertEquals(numInnerHits, theResponse.getHits().getHits()[0].getInnerHits().size()); - for (int innerHitNum = 0; innerHitNum < numInnerHits; innerHitNum++) { - assertSame( - theResponse.getHits().getHits()[0].getInnerHits().get("innerHit" + innerHitNum), - collapsedHits.get(innerHitNum) - ); - } + for (int innerHitNum = 0; innerHitNum < numInnerHits; innerHitNum++) { + assertSame( + theResponse.getHits().getHits()[0].getInnerHits().get("innerHit" + innerHitNum), + collapsedHits.get(innerHitNum) + ); + } - assertTrue(executedMultiSearch.get()); + assertTrue(executedMultiSearch.get()); + } finally { + hits.decRef(); + } } finally { + mockSearchPhaseContext.execute(() -> {}); var resp = mockSearchPhaseContext.searchResponse.get(); if (resp != null) { resp.decRef(); } + } } } @@ -198,22 +206,28 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL SearchHit hit2 = new SearchHit(2, "ID2"); hit2.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(collapseValue))); SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { - @Override - public void run() { - var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); - try { - mockSearchPhaseContext.sendSearchResponse(sections, null); - } finally { - sections.decRef(); + try { + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { + @Override + public void run() { + var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); + try { + mockSearchPhaseContext.sendSearchResponse(sections, null); + } finally { + sections.decRef(); + } } - } - }); - phase.run(); - assertThat(mockSearchPhaseContext.phaseFailure.get(), Matchers.instanceOf(RuntimeException.class)); - assertEquals("boom", mockSearchPhaseContext.phaseFailure.get().getMessage()); - assertNotNull(mockSearchPhaseContext.phaseFailure.get()); - assertNull(mockSearchPhaseContext.searchResponse.get()); + }); + phase.run(); + assertThat(mockSearchPhaseContext.phaseFailure.get(), Matchers.instanceOf(RuntimeException.class)); + assertEquals("boom", mockSearchPhaseContext.phaseFailure.get().getMessage()); + assertNotNull(mockSearchPhaseContext.phaseFailure.get()); + assertNull(mockSearchPhaseContext.searchResponse.get()); + } finally { + mockSearchPhaseContext.execute(() -> {}); + hits.decRef(); + collapsedHits.decRef(); + } } public void testSkipPhase() throws IOException { @@ -231,21 +245,26 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL SearchHit hit2 = new SearchHit(2, "ID2"); hit2.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(null))); SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { - @Override - public void run() { - var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); - try { - mockSearchPhaseContext.sendSearchResponse(sections, null); - } finally { - sections.decRef(); + try { + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { + @Override + public void run() { + var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); + try { + mockSearchPhaseContext.sendSearchResponse(sections, null); + } finally { + sections.decRef(); + } } - } - }); - phase.run(); - mockSearchPhaseContext.assertNoFailure(); - assertNotNull(mockSearchPhaseContext.searchResponse.get()); + }); + phase.run(); + mockSearchPhaseContext.assertNoFailure(); + assertNotNull(mockSearchPhaseContext.searchResponse.get()); + } finally { + hits.decRef(); + } } finally { + mockSearchPhaseContext.execute(() -> {}); var resp = mockSearchPhaseContext.searchResponse.get(); if (resp != null) { resp.decRef(); @@ -328,6 +347,7 @@ public void run() { phase.run(); mockSearchPhaseContext.assertNoFailure(); assertNotNull(mockSearchPhaseContext.searchResponse.get()); + mockSearchPhaseContext.execute(() -> {}); } finally { var resp = mockSearchPhaseContext.searchResponse.get(); if (resp != null) { diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java index 035d01108d655..95a4efcca5fa2 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java @@ -52,9 +52,11 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL phase.run(); } finally { sections.decRef(); + hits.decRef(); } searchPhaseContext.assertNoFailure(); assertNotNull(searchPhaseContext.searchResponse.get()); + searchPhaseContext.execute(() -> {}); } finally { var resp = searchPhaseContext.searchResponse.get(); if (resp != null) { @@ -126,6 +128,7 @@ void sendExecuteMultiSearch( ), null ); + searchHits.decRef(); } ActionListener.respondAndRelease(listener, new MultiSearchResponse(responses, randomNonNegativeLong())); } @@ -192,6 +195,7 @@ void sendExecuteMultiSearch( phase.run(); } finally { sections.decRef(); + searchHits.decRef(); } assertTrue(requestSent.get()); searchPhaseContext.assertNoFailure(); @@ -220,6 +224,7 @@ void sendExecuteMultiSearch( leftHit1.field("lookup_field_3").getValues(), contains(Map.of("field_a", List.of("a2"), "field_b", List.of("b1", "b2"))) ); + searchPhaseContext.execute(() -> {}); } finally { var resp = searchPhaseContext.searchResponse.get(); if (resp != null) { diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java index 4594810da575a..a2c5bed51f5e7 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java @@ -50,16 +50,17 @@ public class FetchSearchPhaseTests extends ESTestCase { public void testShortcutQueryAndFetchOptimization() { SearchPhaseController controller = new SearchPhaseController((t, s) -> InternalAggregationTestCase.emptyReduceContextBuilder()); MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); - SearchPhaseResults results = controller.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.getRequest(), - 1, - exc -> {} - ); - try { + try ( + SearchPhaseResults results = controller.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + 1, + exc -> {} + ) + ) { boolean hasHits = randomBoolean(); boolean profiled = hasHits && randomBoolean(); final int numHits; @@ -78,8 +79,8 @@ public void testShortcutQueryAndFetchOptimization() { FetchSearchResult fetchResult = new FetchSearchResult(); try { fetchResult.setSearchShardTarget(queryResult.getSearchShardTarget()); - SearchHits hits = new SearchHits( - new SearchHit[] { new SearchHit(42) }, + SearchHits hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(42) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F ); @@ -126,7 +127,6 @@ public void run() { if (resp != null) { resp.decRef(); } - results.decRef(); } } @@ -144,16 +144,17 @@ private void assertProfiles(boolean profiled, int totalShards, SearchResponse se public void testFetchTwoDocument() { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); SearchPhaseController controller = new SearchPhaseController((t, s) -> InternalAggregationTestCase.emptyReduceContextBuilder()); - SearchPhaseResults results = controller.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.getRequest(), - 2, - exc -> {} - ); - try { + try ( + SearchPhaseResults results = controller.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + 2, + exc -> {} + ) + ) { int resultSetSize = randomIntBetween(2, 10); boolean profiled = randomBoolean(); @@ -209,16 +210,16 @@ public void sendExecuteFetch( SearchHits hits; if (request.contextId().equals(ctx2)) { fetchResult.setSearchShardTarget(shard2Target); - hits = new SearchHits( - new SearchHit[] { new SearchHit(84) }, + hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(84) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 2.0F ); } else { assertEquals(ctx1, request.contextId()); fetchResult.setSearchShardTarget(shard1Target); - hits = new SearchHits( - new SearchHit[] { new SearchHit(42) }, + hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(42) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F ); @@ -258,23 +259,23 @@ public void run() { if (resp != null) { resp.decRef(); } - results.decRef(); } } public void testFailFetchOneDoc() { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); SearchPhaseController controller = new SearchPhaseController((t, s) -> InternalAggregationTestCase.emptyReduceContextBuilder()); - SearchPhaseResults results = controller.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.getRequest(), - 2, - exc -> {} - ); - try { + try ( + SearchPhaseResults results = controller.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + 2, + exc -> {} + ) + ) { int resultSetSize = randomIntBetween(2, 10); boolean profiled = randomBoolean(); @@ -327,8 +328,8 @@ public void sendExecuteFetch( FetchSearchResult fetchResult = new FetchSearchResult(); try { fetchResult.setSearchShardTarget(shard1Target); - SearchHits hits = new SearchHits( - new SearchHit[] { new SearchHit(84) }, + SearchHits hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(84) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 2.0F ); @@ -386,7 +387,6 @@ public void run() { if (resp != null) { resp.decRef(); } - results.decRef(); } } @@ -397,16 +397,17 @@ public void testFetchDocsConcurrently() throws InterruptedException { boolean profiled = randomBoolean(); SearchPhaseController controller = new SearchPhaseController((t, s) -> InternalAggregationTestCase.emptyReduceContextBuilder()); MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(numHits); - SearchPhaseResults results = controller.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.getRequest(), - numHits, - exc -> {} - ); - try { + try ( + SearchPhaseResults results = controller.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + numHits, + exc -> {} + ) + ) { SearchShardTarget[] shardTargets = new SearchShardTarget[numHits]; for (int i = 0; i < numHits; i++) { shardTargets[i] = new SearchShardTarget("node1", new ShardId("test", "na", i), null); @@ -439,8 +440,8 @@ public void sendExecuteFetch( FetchSearchResult fetchResult = new FetchSearchResult(); try { fetchResult.setSearchShardTarget(shardTargets[(int) request.contextId().getId()]); - SearchHits hits = new SearchHits( - new SearchHit[] { new SearchHit((int) (request.contextId().getId() + 1)) }, + SearchHits hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled((int) (request.contextId().getId() + 1)) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 100F ); @@ -505,23 +506,23 @@ public void run() { if (resp != null) { resp.decRef(); } - results.decRef(); } } public void testExceptionFailsPhase() { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); SearchPhaseController controller = new SearchPhaseController((t, s) -> InternalAggregationTestCase.emptyReduceContextBuilder()); - SearchPhaseResults results = controller.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.getRequest(), - 2, - exc -> {} - ); - try { + try ( + SearchPhaseResults results = controller.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + 2, + exc -> {} + ) + ) { int resultSetSize = randomIntBetween(2, 10); boolean profiled = randomBoolean(); @@ -578,16 +579,16 @@ public void sendExecuteFetch( SearchHits hits; if (request.contextId().getId() == 321) { fetchResult.setSearchShardTarget(shard2Target); - hits = new SearchHits( - new SearchHit[] { new SearchHit(84) }, + hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(84) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 2.0F ); } else { fetchResult.setSearchShardTarget(shard1Target); assertEquals(request, 123); - hits = new SearchHits( - new SearchHit[] { new SearchHit(42) }, + hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(42) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F ); @@ -620,23 +621,23 @@ public void run() { if (resp != null) { resp.decRef(); } - results.decRef(); } } public void testCleanupIrrelevantContexts() { // contexts that are not fetched should be cleaned up MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); SearchPhaseController controller = new SearchPhaseController((t, s) -> InternalAggregationTestCase.emptyReduceContextBuilder()); - SearchPhaseResults results = controller.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.getRequest(), - 2, - exc -> {} - ); - try { + try ( + SearchPhaseResults results = controller.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + 2, + exc -> {} + ) + ) { int resultSetSize = 1; boolean profiled = randomBoolean(); @@ -689,8 +690,8 @@ public void sendExecuteFetch( try { if (request.contextId().getId() == 321) { fetchResult.setSearchShardTarget(shard1Target); - SearchHits hits = new SearchHits( - new SearchHit[] { new SearchHit(84) }, + SearchHits hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(84) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 2.0F ); @@ -740,7 +741,6 @@ public void run() { if (resp != null) { resp.decRef(); } - results.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index 1a510058e3bbd..ed807091ae9a2 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -98,6 +98,8 @@ public void sendSearchResponse(SearchResponseSections internalSearchResponse, At searchContextId ) ); + Releasables.close(releasables); + releasables.clear(); if (existing != null) { existing.decRef(); } @@ -147,12 +149,7 @@ public void addReleasable(Releasable releasable) { @Override public void execute(Runnable command) { - try { - command.run(); - } finally { - Releasables.close(releasables); - releasables.clear(); - } + command.run(); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java index 7e1e7de03e288..91bf1059225d8 100644 --- a/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java @@ -96,7 +96,7 @@ protected OpenPointInTimeRequest mutateInstance(OpenPointInTimeRequest in) throw } public void testUseDefaultConcurrentForOldVersion() throws Exception { - TransportVersion previousVersion = TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_500_020); + TransportVersion previousVersion = TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_9_X); try (BytesStreamOutput output = new BytesStreamOutput()) { TransportVersion version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_0_0, previousVersion); output.setTransportVersion(version); diff --git a/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java b/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java index 6035950ca4635..db32213ff97b7 100644 --- a/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java @@ -103,20 +103,21 @@ public void testProgressListenerExceptionsAreCaught() throws Exception { SearchRequest searchRequest = new SearchRequest("index"); searchRequest.setBatchedReduceSize(2); AtomicReference onPartialMergeFailure = new AtomicReference<>(); - QueryPhaseResultConsumer queryPhaseResultConsumer = new QueryPhaseResultConsumer( - searchRequest, - executor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - searchPhaseController, - () -> false, - searchProgressListener, - 10, - e -> onPartialMergeFailure.accumulateAndGet(e, (prev, curr) -> { - curr.addSuppressed(prev); - return curr; - }) - ); - try { + try ( + QueryPhaseResultConsumer queryPhaseResultConsumer = new QueryPhaseResultConsumer( + searchRequest, + executor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + searchPhaseController, + () -> false, + searchProgressListener, + 10, + e -> onPartialMergeFailure.accumulateAndGet(e, (prev, curr) -> { + curr.addSuppressed(prev); + return curr; + }) + ) + ) { CountDownLatch partialReduceLatch = new CountDownLatch(10); @@ -137,8 +138,6 @@ public void testProgressListenerExceptionsAreCaught() throws Exception { queryPhaseResultConsumer.reduce(); assertEquals(1, searchProgressListener.onFinalReduce.get()); - } finally { - queryPhaseResultConsumer.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index 30e634314e0ba..cb41a03216dc5 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -199,8 +199,7 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { Map aliasFilters = Collections.singletonMap("_na_", AliasFilter.EMPTY); CountDownLatch awaitInitialRequests = new CountDownLatch(1); AtomicInteger numRequests = new AtomicInteger(0); - var results = new ArraySearchPhaseResults(shardsIter.size()); - try { + try (var results = new ArraySearchPhaseResults(shardsIter.size())) { AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction<>( "test", logger, @@ -271,8 +270,6 @@ public void run() { latch.await(); assertTrue(searchPhaseDidRun.get()); assertEquals(numShards, numRequests.get()); - } finally { - results.decRef(); } } @@ -314,9 +311,8 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI ExecutorService executor = Executors.newFixedThreadPool(randomIntBetween(1, Runtime.getRuntime().availableProcessors())); final CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean latchTriggered = new AtomicBoolean(); - var results = new ArraySearchPhaseResults(shardsIter.size()); final TestSearchResponse testResponse = new TestSearchResponse(); - try { + try (var results = new ArraySearchPhaseResults(shardsIter.size())) { AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction<>( "test", logger, @@ -395,7 +391,6 @@ public void run() { assertThat(runnables, equalTo(Collections.emptyList())); } finally { testResponse.decRef(); - results.decRef(); } } @@ -550,8 +545,7 @@ public void testAllowPartialResults() throws InterruptedException { Map aliasFilters = Collections.singletonMap("_na_", AliasFilter.EMPTY); AtomicInteger numRequests = new AtomicInteger(0); AtomicInteger numFailReplicas = new AtomicInteger(0); - var results = new ArraySearchPhaseResults(shardsIter.size()); - try { + try (var results = new ArraySearchPhaseResults(shardsIter.size())) { AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction<>( "test", logger, @@ -620,8 +614,6 @@ public void run() { assertTrue(searchPhaseDidRun.get()); assertEquals(numShards, numRequests.get()); assertThat(numFailReplicas.get(), greaterThanOrEqualTo(1)); - } finally { - results.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index ac88f999adef6..1f81ad2a02e8c 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -558,7 +558,7 @@ private static AtomicArray generateFetchResults( List searchHits = new ArrayList<>(); for (ScoreDoc scoreDoc : mergedSearchDocs) { if (scoreDoc.shardIndex == shardIndex) { - searchHits.add(new SearchHit(scoreDoc.doc, "")); + searchHits.add(SearchHit.unpooled(scoreDoc.doc, "")); if (scoreDoc.score > maxScore) { maxScore = scoreDoc.score; } @@ -570,7 +570,7 @@ private static AtomicArray generateFetchResults( for (CompletionSuggestion.Entry.Option option : ((CompletionSuggestion) suggestion).getOptions()) { ScoreDoc doc = option.getDoc(); if (doc.shardIndex == shardIndex) { - searchHits.add(new SearchHit(doc.doc, "")); + searchHits.add(SearchHit.unpooled(doc.doc, "")); if (doc.score > maxScore) { maxScore = doc.score; } @@ -583,7 +583,10 @@ private static AtomicArray generateFetchResults( ProfileResult profileResult = profile && searchHits.size() > 0 ? new ProfileResult("fetch", "fetch", Map.of(), Map.of(), randomNonNegativeLong(), List.of()) : null; - fetchSearchResult.shardResult(new SearchHits(hits, new TotalHits(hits.length, Relation.EQUAL_TO), maxScore), profileResult); + fetchSearchResult.shardResult( + SearchHits.unpooled(hits, new TotalHits(hits.length, Relation.EQUAL_TO), maxScore), + profileResult + ); fetchResults.set(shardIndex, fetchSearchResult); } return fetchResults; @@ -610,16 +613,17 @@ private void consumerTestCase(int numEmptyResponses) throws Exception { SearchRequest request = randomSearchRequest(); request.source(new SearchSourceBuilder().aggregation(new MaxAggregationBuilder("test"))); request.setBatchedReduceSize(bufferSize); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - 3 + numEmptyResponses, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + 3 + numEmptyResponses, + exc -> {} + ) + ) { if (numEmptyResponses == 0) { assertEquals(0, reductions.size()); } @@ -723,8 +727,6 @@ private void consumerTestCase(int numEmptyResponses) throws Exception { assertNull(reduce.sortedTopDocs().sortFields()); assertNull(reduce.sortedTopDocs().collapseField()); assertNull(reduce.sortedTopDocs().collapseValues()); - } finally { - consumer.decRef(); } } @@ -735,16 +737,17 @@ public void testConsumerConcurrently() throws Exception { SearchRequest request = randomSearchRequest(); request.source(new SearchSourceBuilder().aggregation(new MaxAggregationBuilder("test"))); request.setBatchedReduceSize(bufferSize); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - expectedNumResults, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + expectedNumResults, + exc -> {} + ) + ) { AtomicInteger max = new AtomicInteger(); Thread[] threads = new Thread[expectedNumResults]; CountDownLatch latch = new CountDownLatch(expectedNumResults); @@ -797,8 +800,6 @@ public void testConsumerConcurrently() throws Exception { assertNull(reduce.sortedTopDocs().sortFields()); assertNull(reduce.sortedTopDocs().collapseField()); assertNull(reduce.sortedTopDocs().collapseValues()); - } finally { - consumer.decRef(); } } @@ -808,16 +809,17 @@ public void testConsumerOnlyAggs() throws Exception { SearchRequest request = randomSearchRequest(); request.source(new SearchSourceBuilder().aggregation(new MaxAggregationBuilder("test")).size(0)); request.setBatchedReduceSize(bufferSize); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - expectedNumResults, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + expectedNumResults, + exc -> {} + ) + ) { AtomicInteger max = new AtomicInteger(); CountDownLatch latch = new CountDownLatch(expectedNumResults); for (int i = 0; i < expectedNumResults; i++) { @@ -857,8 +859,6 @@ public void testConsumerOnlyAggs() throws Exception { assertNull(reduce.sortedTopDocs().sortFields()); assertNull(reduce.sortedTopDocs().collapseField()); assertNull(reduce.sortedTopDocs().collapseValues()); - } finally { - consumer.decRef(); } } @@ -870,16 +870,18 @@ public void testConsumerOnlyHits() throws Exception { request.source(new SearchSourceBuilder().size(randomIntBetween(1, 10))); } request.setBatchedReduceSize(bufferSize); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - expectedNumResults, - exc -> {} - ); - try { + + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + expectedNumResults, + exc -> {} + ) + ) { AtomicInteger max = new AtomicInteger(); CountDownLatch latch = new CountDownLatch(expectedNumResults); for (int i = 0; i < expectedNumResults; i++) { @@ -916,8 +918,6 @@ public void testConsumerOnlyHits() throws Exception { assertNull(reduce.sortedTopDocs().sortFields()); assertNull(reduce.sortedTopDocs().collapseField()); assertNull(reduce.sortedTopDocs().collapseValues()); - } finally { - consumer.decRef(); } } @@ -937,16 +937,17 @@ public void testReduceTopNWithFromOffset() throws Exception { SearchRequest request = new SearchRequest(); request.source(new SearchSourceBuilder().size(5).from(5)); request.setBatchedReduceSize(randomIntBetween(2, 4)); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - 4, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + 4, + exc -> {} + ) + ) { int score = 100; CountDownLatch latch = new CountDownLatch(4); for (int i = 0; i < 4; i++) { @@ -984,8 +985,6 @@ public void testReduceTopNWithFromOffset() throws Exception { assertEquals(93.0f, scoreDocs[2].score, 0.0f); assertEquals(92.0f, scoreDocs[3].score, 0.0f); assertEquals(91.0f, scoreDocs[4].score, 0.0f); - } finally { - consumer.decRef(); } } @@ -995,16 +994,17 @@ public void testConsumerSortByField() throws Exception { SearchRequest request = randomSearchRequest(); int size = randomIntBetween(1, 10); request.setBatchedReduceSize(bufferSize); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - expectedNumResults, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + expectedNumResults, + exc -> {} + ) + ) { AtomicInteger max = new AtomicInteger(); SortField[] sortFields = { new SortField("field", SortField.Type.INT, true) }; DocValueFormat[] docValueFormats = { DocValueFormat.RAW }; @@ -1040,8 +1040,6 @@ public void testConsumerSortByField() throws Exception { assertEquals(SortField.Type.INT, reduce.sortedTopDocs().sortFields()[0].getType()); assertNull(reduce.sortedTopDocs().collapseField()); assertNull(reduce.sortedTopDocs().collapseValues()); - } finally { - consumer.decRef(); } } @@ -1051,16 +1049,17 @@ public void testConsumerFieldCollapsing() throws Exception { SearchRequest request = randomSearchRequest(); int size = randomIntBetween(5, 10); request.setBatchedReduceSize(bufferSize); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - expectedNumResults, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + expectedNumResults, + exc -> {} + ) + ) { SortField[] sortFields = { new SortField("field", SortField.Type.STRING) }; BytesRef a = new BytesRef("a"); BytesRef b = new BytesRef("b"); @@ -1100,8 +1099,6 @@ public void testConsumerFieldCollapsing() throws Exception { assertEquals(SortField.Type.STRING, reduce.sortedTopDocs().sortFields()[0].getType()); assertEquals("field", reduce.sortedTopDocs().collapseField()); assertArrayEquals(collapseValues, reduce.sortedTopDocs().collapseValues()); - } finally { - consumer.decRef(); } } @@ -1110,16 +1107,17 @@ public void testConsumerSuggestions() throws Exception { int bufferSize = randomIntBetween(2, 200); SearchRequest request = randomSearchRequest(); request.setBatchedReduceSize(bufferSize); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - expectedNumResults, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + expectedNumResults, + exc -> {} + ) + ) { int maxScoreTerm = -1; int maxScorePhrase = -1; int maxScoreCompletion = -1; @@ -1216,8 +1214,6 @@ public void testConsumerSuggestions() throws Exception { assertNull(reduce.sortedTopDocs().sortFields()); assertNull(reduce.sortedTopDocs().collapseField()); assertNull(reduce.sortedTopDocs().collapseValues()); - } finally { - consumer.decRef(); } } @@ -1257,16 +1253,17 @@ public void onFinalReduce(List shards, TotalHits totalHits, Interna assertEquals(numReduceListener.incrementAndGet(), reducePhase); } }; - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - progressListener, - request, - expectedNumResults, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + progressListener, + request, + expectedNumResults, + exc -> {} + ) + ) { AtomicInteger max = new AtomicInteger(); Thread[] threads = new Thread[expectedNumResults]; CountDownLatch latch = new CountDownLatch(expectedNumResults); @@ -1324,8 +1321,6 @@ public void onFinalReduce(List shards, TotalHits totalHits, Interna assertEquals(expectedNumResults, numQueryResultListener.get()); assertEquals(0, numQueryFailureListener.get()); assertEquals(numReduceListener.get(), reduce.numReducePhases()); - } finally { - consumer.decRef(); } } } @@ -1348,16 +1343,17 @@ private void testReduceCase(int numShards, int bufferSize, boolean shouldFail) t if (shouldFailPartial) { circuitBreaker.shouldBreak.set(true); } - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - circuitBreaker, - () -> false, - SearchProgressListener.NOOP, - request, - numShards, - exc -> hasConsumedFailure.set(true) - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + circuitBreaker, + () -> false, + SearchProgressListener.NOOP, + request, + numShards, + exc -> hasConsumedFailure.set(true) + ) + ) { CountDownLatch latch = new CountDownLatch(numShards); Thread[] threads = new Thread[numShards]; for (int i = 0; i < numShards; i++) { @@ -1406,8 +1402,6 @@ private void testReduceCase(int numShards, int bufferSize, boolean shouldFail) t } else { consumer.reduce(); } - } finally { - consumer.decRef(); } assertThat(circuitBreaker.allocated, equalTo(0L)); } @@ -1420,16 +1414,17 @@ public void testFailConsumeAggs() throws Exception { request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo")).size(0)); request.setBatchedReduceSize(bufferSize); AtomicBoolean hasConsumedFailure = new AtomicBoolean(); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - expectedNumResults, - exc -> hasConsumedFailure.set(true) - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + expectedNumResults, + exc -> hasConsumedFailure.set(true) + ) + ) { for (int i = 0; i < expectedNumResults; i++) { final int index = i; QuerySearchResult result = new QuerySearchResult( @@ -1454,8 +1449,6 @@ public void testFailConsumeAggs() throws Exception { } } assertNull(consumer.reduce().aggregations()); - } finally { - consumer.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java index 760070979077d..aef472928923b 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java @@ -187,17 +187,18 @@ public void sendExecuteQuery( searchRequest.allowPartialSearchResults(false); SearchPhaseController controller = new SearchPhaseController((t, r) -> InternalAggregationTestCase.emptyReduceContextBuilder()); SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); - QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( - searchRequest, - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - controller, - task::isCancelled, - task.getProgressListener(), - shardsIter.size(), - exc -> {} - ); - try { + try ( + QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( + searchRequest, + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + controller, + task::isCancelled, + task.getProgressListener(), + shardsIter.size(), + exc -> {} + ) + ) { SearchQueryThenFetchAsyncAction action = new SearchQueryThenFetchAsyncAction( logger, null, @@ -252,8 +253,6 @@ public void run() { assertThat(phase.sortedTopDocs().scoreDocs()[0], instanceOf(FieldDoc.class)); assertThat(((FieldDoc) phase.sortedTopDocs().scoreDocs()[0]).fields.length, equalTo(1)); assertThat(((FieldDoc) phase.sortedTopDocs().scoreDocs()[0]).fields[0], equalTo(0)); - } finally { - resultConsumer.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 8c0ffeabf0ea6..6d66a1fcd3847 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -154,7 +154,7 @@ public void testRandomVersionSerialization() throws IOException { // Versions before 8.8 don't support rank searchRequest.source().rankBuilder(null); } - if (version.before(TransportVersions.V_8_500_020) && searchRequest.source() != null) { + if (version.before(TransportVersions.V_8_9_X) && searchRequest.source() != null) { // Versions before 8_500_999 don't support queries searchRequest.source().subSearches(new ArrayList<>()); } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java index e81d7a2246e03..0070d61a2adcb 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java @@ -60,6 +60,7 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.test.InternalAggregationTestCase.emptyReduceContextBuilder; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -394,7 +395,7 @@ public void testMergeCompletionSuggestions() throws InterruptedException { i, Collections.emptyMap() ); - SearchHit hit = new SearchHit(docId); + SearchHit hit = SearchHit.unpooled(docId); ShardId shardId = new ShardId( randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), @@ -480,7 +481,7 @@ public void testMergeCompletionSuggestionsTieBreak() throws InterruptedException 1F, Collections.emptyMap() ); - SearchHit searchHit = new SearchHit(docId); + SearchHit searchHit = SearchHit.unpooled(docId); searchHit.shard( new SearchShardTarget( "node", @@ -658,6 +659,7 @@ public void testMergeAggs() throws InterruptedException { ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY ); + try { addResponse(searchResponseMerger, searchResponse); } finally { @@ -820,9 +822,11 @@ public void testMergeSearchHits() throws InterruptedException { ShardSearchFailure.EMPTY_ARRAY, SearchResponseTests.randomClusters() ); + try { addResponse(searchResponseMerger, searchResponse); } finally { + searchHits.decRef(); searchResponse.decRef(); } } @@ -969,6 +973,7 @@ public void testMergeEmptySearchHitsWithNonEmpty() { try { merger.add(searchResponse); } finally { + searchHits.decRef(); searchResponse.decRef(); } } @@ -1119,6 +1124,443 @@ private static SearchHit[] randomSearchHitArray( return hits; } + /** + * Tests the partial results scenario used by MutableSearchResponse when + * doing cross-cluster search with minimize_roundtrips=true + */ + public void testPartialAggsMixedWithFullResponses() { + String maxAggName = "max123"; + String rangeAggName = "range123"; + + // partial aggs from local cluster (no search hits) + double value = 33.33; + int count = 33; + SearchResponse searchResponsePartialAggs = new SearchResponse( + SearchHits.empty(new TotalHits(0L, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), Float.NaN), + createDeterminsticAggregation(maxAggName, rangeAggName, value, count), + null, + false, + null, + null, + 1, + null, + 2, + 2, + 0, + 33, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + + // full response from remote1 remote cluster + value = 44.44; + count = 44; + String clusterAlias = "remote1"; + int total = 3; + int successful = 2; + int skipped = 1; + Index[] indices = new Index[] { new Index("foo_idx", "1bba9f5b-c5a1-4664-be1b-26be590c1aff") }; + final SearchResponse searchResponseRemote1 = new SearchResponse( + createSimpleDeterministicSearchHits(clusterAlias, indices), + createDeterminsticAggregation(maxAggName, rangeAggName, value, count), + null, + false, + null, + null, + 1, + null, + total, + successful, + skipped, + 44, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + + // full response from remote2 remote cluster + value = 55.55; + count = 55; + clusterAlias = "remote2"; + total = 3; + successful = 2; + skipped = 1; + indices = new Index[] { new Index("foo_idx", "ae024679-097a-4a27-abf8-403f1e9189de") }; + SearchResponse searchResponseRemote2 = new SearchResponse( + createSimpleDeterministicSearchHits(clusterAlias, indices), + createDeterminsticAggregation(maxAggName, rangeAggName, value, count), + null, + false, + null, + null, + 1, + null, + total, + successful, + skipped, + 55, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + try { + SearchResponse.Clusters clusters = SearchResponseTests.createCCSClusterObject( + 3, + 2, + true, + 2, + 1, + 0, + 0, + new ShardSearchFailure[0] + ); + + // merge partial aggs with remote1, check, then merge in remote2, check + try ( + SearchResponseMerger searchResponseMerger = new SearchResponseMerger( + 0, + 10, + 10, + new SearchTimeProvider(0, 0, () -> 0), + emptyReduceContextBuilder( + new AggregatorFactories.Builder().addAggregator(new MaxAggregationBuilder(maxAggName)) + .addAggregator(new DateRangeAggregationBuilder(rangeAggName)) + ) + ) + ) { + searchResponseMerger.add(searchResponsePartialAggs); + searchResponseMerger.add(searchResponseRemote1); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + try { + SearchHits hits = mergedResponse.getHits(); + assertThat(hits.getTotalHits().value, equalTo(2L)); // should be 2 hits from remote1 + SearchHit hit1 = hits.getHits()[0]; + String expectedHit1 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit1.toString(), expectedHit1); + + SearchHit hit2 = hits.getHits()[1]; + String expectedHit2 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit2.toString(), expectedHit2); + + double expectedMaxValue = 44.44; // value from remote1 + long expectedBucketsDocCount = 33 + 44; + Max max = mergedResponse.getAggregations().get(maxAggName); + assertEquals(expectedMaxValue, max.value(), 0d); + Range range = mergedResponse.getAggregations().get(rangeAggName); + assertEquals(1, range.getBuckets().size()); + Range.Bucket bucket = range.getBuckets().get(0); + assertEquals("0.0", bucket.getFromAsString()); + assertEquals("10000.0", bucket.getToAsString()); + assertEquals(expectedBucketsDocCount, bucket.getDocCount()); + } finally { + mergedResponse.decRef(); + } + + searchResponseMerger.add(searchResponseRemote2); + mergedResponse = searchResponseMerger.getMergedResponse(clusters); + try { + SearchHits hits = mergedResponse.getHits(); + assertThat(hits.getTotalHits().value, equalTo(4L)); // should be 2 hits from remote1, 2 from remote2 + + SearchHit hit1 = hits.getHits()[0]; + String expectedHit1 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit1.toString(), expectedHit1); + + SearchHit hit2 = hits.getHits()[1]; + String expectedHit2 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit2.toString(), expectedHit2); + + SearchHit hit3 = hits.getHits()[2]; + String expectedHit3 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit3.toString(), expectedHit3); + + SearchHit hit4 = hits.getHits()[3]; + String expectedHit4 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit4.toString(), expectedHit4); + + double expectedMaxValue = 55.55; // value from remote2 + long expectedBucketsDocCount = 33 + 44 + 55; + Max max = mergedResponse.getAggregations().get(maxAggName); + assertEquals(expectedMaxValue, max.value(), 0d); + Range range = mergedResponse.getAggregations().get(rangeAggName); + assertEquals(1, range.getBuckets().size()); + Range.Bucket bucket = range.getBuckets().get(0); + assertEquals("0.0", bucket.getFromAsString()); + assertEquals("10000.0", bucket.getToAsString()); + assertEquals(expectedBucketsDocCount, bucket.getDocCount()); + } finally { + mergedResponse.decRef(); + } + } + + // merge remote1 and remote2, no partial aggs, check, then merge in partial aggs from local, check + try ( + SearchResponseMerger searchResponseMerger = new SearchResponseMerger( + 0, + 10, + 10, + new SearchTimeProvider(0, 0, () -> 0), + emptyReduceContextBuilder( + new AggregatorFactories.Builder().addAggregator(new MaxAggregationBuilder(maxAggName)) + .addAggregator(new DateRangeAggregationBuilder(rangeAggName)) + ) + ) + ) { + searchResponseMerger.add(searchResponseRemote2); + searchResponseMerger.add(searchResponseRemote1); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + try { + SearchHits hits = mergedResponse.getHits(); + SearchHit hit1 = hits.getHits()[0]; + String expectedHit1 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit1.toString(), expectedHit1); + + SearchHit hit2 = hits.getHits()[1]; + String expectedHit2 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit2.toString(), expectedHit2); + + SearchHit hit3 = hits.getHits()[2]; + String expectedHit3 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit3.toString(), expectedHit3); + + SearchHit hit4 = hits.getHits()[3]; + String expectedHit4 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit4.toString(), expectedHit4); + + double expectedMaxValue = 55.55; // value from remote2 + long expectedBucketsDocCount = 44 + 55; // missing 33 from local partial aggs + Max max = mergedResponse.getAggregations().get(maxAggName); + assertEquals(expectedMaxValue, max.value(), 0d); + Range range = mergedResponse.getAggregations().get(rangeAggName); + assertEquals(1, range.getBuckets().size()); + Range.Bucket bucket = range.getBuckets().get(0); + assertEquals("0.0", bucket.getFromAsString()); + assertEquals("10000.0", bucket.getToAsString()); + assertEquals(expectedBucketsDocCount, bucket.getDocCount()); + } finally { + mergedResponse.decRef(); + } + + searchResponseMerger.add(searchResponsePartialAggs); + mergedResponse = searchResponseMerger.getMergedResponse(clusters); + try { + SearchHits hits = mergedResponse.getHits(); + assertThat(hits.getTotalHits().value, equalTo(4L)); // should be 2 hits from remote1, 2 from remote2 + + SearchHit hit1 = hits.getHits()[0]; + String expectedHit1 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit1.toString(), expectedHit1); + + SearchHit hit2 = hits.getHits()[1]; + String expectedHit2 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit2.toString(), expectedHit2); + + SearchHit hit3 = hits.getHits()[2]; + String expectedHit3 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit3.toString(), expectedHit3); + + SearchHit hit4 = hits.getHits()[3]; + String expectedHit4 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit4.toString(), expectedHit4); + + double expectedMaxValue = 55.55; // value from remote2 + long expectedBucketsDocCount = 33 + 44 + 55; // contributions from all 3 search responses + Max max = mergedResponse.getAggregations().get(maxAggName); + assertEquals(expectedMaxValue, max.value(), 0d); + Range range = mergedResponse.getAggregations().get(rangeAggName); + assertEquals(1, range.getBuckets().size()); + Range.Bucket bucket = range.getBuckets().get(0); + assertEquals("0.0", bucket.getFromAsString()); + assertEquals("10000.0", bucket.getToAsString()); + assertEquals(expectedBucketsDocCount, bucket.getDocCount()); + } finally { + mergedResponse.decRef(); + } + } + } finally { + searchResponseRemote1.decRef(); + searchResponseRemote2.decRef(); + searchResponsePartialAggs.decRef(); + } + } + + private SearchHits createSimpleDeterministicSearchHits(String clusterAlias, Index[] indices) { + TotalHits totalHits = new TotalHits(2, TotalHits.Relation.EQUAL_TO); + final int numDocs = (int) totalHits.value; + int scoreFactor = 1; + float maxScore = numDocs; + int numFields = 1; + SortField[] sortFields = new SortField[numFields]; + sortFields[0] = SortField.FIELD_SCORE; + PriorityQueue priorityQueue = new PriorityQueue<>(new SearchHitComparator(sortFields)); + SearchHit[] hits = deterministicSearchHitArray(numDocs, clusterAlias, indices, maxScore, scoreFactor, sortFields, priorityQueue); + + return SearchHits.unpooled(hits, totalHits, maxScore == Float.NEGATIVE_INFINITY ? Float.NaN : maxScore, sortFields, null, null); + } + + private static InternalAggregations createDeterminsticAggregation(String maxAggName, String rangeAggName, double value, int count) { + Max max = new Max(maxAggName, value, DocValueFormat.RAW, Collections.emptyMap()); + InternalDateRange.Factory factory = new InternalDateRange.Factory(); + InternalDateRange.Bucket bucket = factory.createBucket( + "bucket", + 0D, + 10000D, + count, + InternalAggregations.EMPTY, + false, + DocValueFormat.RAW + ); + + InternalDateRange range = factory.create(rangeAggName, singletonList(bucket), DocValueFormat.RAW, false, emptyMap()); + InternalAggregations aggs = InternalAggregations.from(Arrays.asList(range, max)); + return aggs; + } + + private static SearchHit[] deterministicSearchHitArray( + int numDocs, + String clusterAlias, + Index[] indices, + float maxScore, + int scoreFactor, + SortField[] sortFields, + PriorityQueue priorityQueue + ) { + SearchHit[] hits = new SearchHit[numDocs]; + + int[] sortFieldFactors = new int[sortFields == null ? 0 : sortFields.length]; + for (int j = 0; j < sortFieldFactors.length; j++) { + sortFieldFactors[j] = 1; + } + + for (int j = 0; j < numDocs; j++) { + ShardId shardId = new ShardId(randomFrom(indices), j); + SearchShardTarget shardTarget = new SearchShardTarget("abc123", shardId, clusterAlias); + SearchHit hit = SearchHit.unpooled(j); + + float score = Float.NaN; + if (Float.isNaN(maxScore) == false) { + score = (maxScore - j) * scoreFactor; + hit.score(score); + } + + hit.shard(shardTarget); + if (sortFields != null) { + Object[] rawSortValues = new Object[sortFields.length]; + DocValueFormat[] docValueFormats = new DocValueFormat[sortFields.length]; + for (int k = 0; k < sortFields.length; k++) { + SortField sortField = sortFields[k]; + if (sortField == SortField.FIELD_SCORE) { + hit.score(score); + rawSortValues[k] = score; + } else { + rawSortValues[k] = sortField.getReverse() ? numDocs * sortFieldFactors[k] - j : j; + } + docValueFormats[k] = DocValueFormat.RAW; + } + hit.sortValues(rawSortValues, docValueFormats); + } + hits[j] = hit; + priorityQueue.add(hit); + } + return hits; + } + private static Map randomRealisticIndices(int numIndices, int numClusters) { String[] indicesNames = new String[numIndices]; for (int i = 0; i < numIndices; i++) { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java index ef759279e095f..0d85d020c4180 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java @@ -115,25 +115,29 @@ private SearchResponse createTestItem(boolean minimal, ShardSearchFailure... sha } if (minimal == false) { SearchHits hits = SearchHitsTests.createTestItem(true, true); - InternalAggregations aggregations = aggregationsTests.createTestInstance(); - Suggest suggest = SuggestTests.createTestItem(); - SearchProfileResults profileResults = SearchProfileResultsTests.createTestItem(); - return new SearchResponse( - hits, - aggregations, - suggest, - timedOut, - terminatedEarly, - profileResults, - numReducePhases, - null, - totalShards, - successfulShards, - skippedShards, - tookInMillis, - shardSearchFailures, - clusters - ); + try { + InternalAggregations aggregations = aggregationsTests.createTestInstance(); + Suggest suggest = SuggestTests.createTestItem(); + SearchProfileResults profileResults = SearchProfileResultsTests.createTestItem(); + return new SearchResponse( + hits, + aggregations, + suggest, + timedOut, + terminatedEarly, + profileResults, + numReducePhases, + null, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardSearchFailures, + clusters + ); + } finally { + hits.decRef(); + } } else { return SearchResponseUtils.emptyWithTotalHits( null, @@ -381,9 +385,10 @@ public void testToXContent() throws IOException { SearchHit hit = new SearchHit(1, "id1"); hit.score(2.0f); SearchHit[] hits = new SearchHit[] { hit }; + var sHits = new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f); { SearchResponse response = new SearchResponse( - new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), + sHits, null, null, false, @@ -425,7 +430,7 @@ public void testToXContent() throws IOException { } { SearchResponse response = new SearchResponse( - new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), + sHits, null, null, false, @@ -475,7 +480,7 @@ public void testToXContent() throws IOException { } { SearchResponse response = new SearchResponse( - new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), + sHits, null, null, false, @@ -617,6 +622,7 @@ public void testToXContent() throws IOException { response.decRef(); } } + sHits.decRef(); } public void testSerialization() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java index fb27d824417b1..d04e41c83699d 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; @@ -52,7 +53,7 @@ public void testParentTaskId() throws Exception { Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); - ThreadPool threadPool = new ThreadPool(settings); + ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP); try { TransportService transportService = new TransportService( Settings.EMPTY, @@ -120,7 +121,7 @@ public void testBatchExecute() throws ExecutionException, InterruptedException { Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); - ThreadPool threadPool = new ThreadPool(settings); + ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP); TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index fea6e39ea881b..e0eed9daa97f6 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -76,6 +76,7 @@ import org.elasticsearch.search.vectors.KnnSearchBuilder; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.transport.MockTransportService; @@ -509,8 +510,12 @@ public void testCCSRemoteReduceMergeFails() throws Exception { ActionListener.wrap(r -> fail("no response expected"), failure::set), latch ); + + TaskId parentTaskId = new TaskId("n", 1); + SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); TransportSearchAction.ccsRemoteReduce( - new TaskId("n", 1), + task, + parentTaskId, searchRequest, localIndices, remoteIndicesByCluster, @@ -566,6 +571,7 @@ public void testCCSRemoteReduce() throws Exception { service.start(); service.acceptIncomingRequests(); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); + // using from: 0 and size: 10 { SearchRequest searchRequest = new SearchRequest(); final CountDownLatch latch = new CountDownLatch(1); @@ -578,8 +584,11 @@ public void testCCSRemoteReduce() throws Exception { }), latch ); + TaskId parentTaskId = new TaskId("n", 1); + SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); TransportSearchAction.ccsRemoteReduce( - new TaskId("n", 1), + task, + parentTaskId, searchRequest, localIndices, remoteIndicesByCluster, @@ -617,6 +626,93 @@ public void testCCSRemoteReduce() throws Exception { searchResponse.decRef(); } } + + // using from: 5 and size: 6 + { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().from(5).size(6); + SearchRequest searchRequest = new SearchRequest(new String[] { "*", "*:*" }, sourceBuilder); + final CountDownLatch latch = new CountDownLatch(1); + SetOnce>> setOnce = new SetOnce<>(); + final SetOnce response = new SetOnce<>(); + LatchedActionListener listener = new LatchedActionListener<>( + ActionTestUtils.assertNoFailureListener(newValue -> { + newValue.incRef(); + response.set(newValue); + }), + latch + ); + TaskId parentTaskId = new TaskId("n", 1); + SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); + TransportSearchAction.ccsRemoteReduce( + task, + parentTaskId, + searchRequest, + localIndices, + remoteIndicesByCluster, + new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true, alias -> randomBoolean()), + timeProvider, + emptyReduceContextBuilder(), + remoteClusterService, + threadPool, + listener, + (r, l) -> setOnce.set(Tuple.tuple(r, l)) + ); + if (localIndices == null) { + assertNull(setOnce.get()); + } else { + Tuple> tuple = setOnce.get(); + assertEquals("", tuple.v1().getLocalClusterAlias()); + assertThat(tuple.v2(), instanceOf(TransportSearchAction.CCSActionListener.class)); + resolveWithEmptySearchResponse(tuple); + } + awaitLatch(latch, 5, TimeUnit.SECONDS); + + SearchResponse searchResponse = response.get(); + try { + assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); + assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); + assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); + assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); + assertEquals(totalClusters, searchResponse.getClusters().getTotal()); + assertEquals( + totalClusters, + searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL) + ); + assertEquals(totalClusters == 1 ? 1 : totalClusters + 1, searchResponse.getNumReducePhases()); + } finally { + searchResponse.decRef(); + } + } + + } finally { + for (MockTransportService mockTransportService : mockTransportServices) { + mockTransportService.close(); + } + } + } + + public void testCCSRemoteReduceWhereRemoteClustersFail() throws Exception { + int numClusters = randomIntBetween(1, 10); + DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; + Map remoteIndicesByCluster = new HashMap<>(); + Settings.Builder builder = Settings.builder(); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + Settings settings = builder.build(); + boolean local = randomBoolean(); + OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); + try ( + MockTransportService service = MockTransportService.createNewService( + settings, + VersionInformation.CURRENT, + TransportVersion.current(), + threadPool, + null + ) + ) { + service.start(); + service.acceptIncomingRequests(); + RemoteClusterService remoteClusterService = service.getRemoteClusterService(); { SearchRequest searchRequest = new SearchRequest(); searchRequest.preference("index_not_found"); @@ -627,8 +723,12 @@ public void testCCSRemoteReduce() throws Exception { ActionListener.wrap(r -> fail("no response expected"), failure::set), latch ); + + TaskId parentTaskId = new TaskId("n", 1); + SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); TransportSearchAction.ccsRemoteReduce( - new TaskId("n", 1), + task, + parentTaskId, searchRequest, localIndices, remoteIndicesByCluster, @@ -655,6 +755,37 @@ public void testCCSRemoteReduce() throws Exception { assertEquals(RestStatus.NOT_FOUND, remoteTransportException.status()); } + } finally { + for (MockTransportService mockTransportService : mockTransportServices) { + mockTransportService.close(); + } + } + } + + public void testCCSRemoteReduceWithDisconnectedRemoteClusters() throws Exception { + int numClusters = randomIntBetween(1, 10); + DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; + Map remoteIndicesByCluster = new HashMap<>(); + Settings.Builder builder = Settings.builder(); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + Settings settings = builder.build(); + boolean local = randomBoolean(); + OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; + int totalClusters = numClusters + (local ? 1 : 0); + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); + try ( + MockTransportService service = MockTransportService.createNewService( + settings, + VersionInformation.CURRENT, + TransportVersion.current(), + threadPool, + null + ) + ) { + service.start(); + service.acceptIncomingRequests(); + RemoteClusterService remoteClusterService = service.getRemoteClusterService(); + int numDisconnectedClusters = randomIntBetween(1, numClusters); Set disconnectedNodes = Sets.newHashSetWithExpectedSize(numDisconnectedClusters); Set disconnectedNodesIndices = Sets.newHashSetWithExpectedSize(numDisconnectedClusters); @@ -687,8 +818,11 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti ActionListener.wrap(r -> fail("no response expected"), failure::set), latch ); + TaskId parentTaskId = new TaskId("n", 1); + SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); TransportSearchAction.ccsRemoteReduce( - new TaskId("n", 1), + task, + parentTaskId, searchRequest, localIndices, remoteIndicesByCluster, @@ -736,8 +870,11 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti if (localIndices != null) { clusterAliases.add(""); } + TaskId parentTaskId = new TaskId("n", 1); + SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); TransportSearchAction.ccsRemoteReduce( - new TaskId("n", 1), + task, + parentTaskId, searchRequest, localIndices, remoteIndicesByCluster, @@ -807,8 +944,11 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti if (localIndices != null) { clusterAliases.add(""); } + TaskId parentTaskId = new TaskId("n", 1); + SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); TransportSearchAction.ccsRemoteReduce( - new TaskId("n", 1), + task, + parentTaskId, searchRequest, localIndices, remoteIndicesByCluster, @@ -1557,7 +1697,7 @@ public void testCCSCompatibilityCheck() throws Exception { ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); - ThreadPool threadPool = new ThreadPool(settings); + ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP); try { TransportService transportService = MockTransportService.createNewService( Settings.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java index 64ab7a9819190..82c204b1d0b88 100644 --- a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.node.Node; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; @@ -50,7 +51,10 @@ public class TransportActionFilterChainTests extends ESTestCase { @Before public void init() throws Exception { counter = new AtomicInteger(); - threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "TransportActionFilterChainTests").build()); + threadPool = new ThreadPool( + Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "TransportActionFilterChainTests").build(), + MeterRegistry.NOOP + ); } @After diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index 8bda62b91bc7e..86749c26ba730 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.flush.TransportFlushAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionTestUtils; @@ -20,6 +19,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardRoutingState; @@ -286,9 +286,9 @@ protected void shardExecute( } } - public FlushResponse assertImmediateResponse(String index, TransportFlushAction flushAction) { + public BroadcastResponse assertImmediateResponse(String index, TransportFlushAction flushAction) { Date beginDate = new Date(); - FlushResponse flushResponse = ActionTestUtils.executeBlocking(flushAction, new FlushRequest(index)); + BroadcastResponse flushResponse = ActionTestUtils.executeBlocking(flushAction, new FlushRequest(index)); Date endDate = new Date(); long maxTime = 500; assertThat( diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index 735ae41558240..7ee4d2d6bba9b 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -55,6 +55,7 @@ import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; @@ -516,6 +517,27 @@ public void testToValidateUpsertRequestWithVersion() { assertThat(updateRequest.validate().validationErrors(), contains("can't provide version in upsert request")); } + public void testUpdatingRejectsLongIds() { + String id = randomAlphaOfLength(511); + UpdateRequest request = new UpdateRequest("index", id); + request.doc("{}", XContentType.JSON); + ActionRequestValidationException validate = request.validate(); + assertNull(validate); + + id = randomAlphaOfLength(512); + request = new UpdateRequest("index", id); + request.doc("{}", XContentType.JSON); + validate = request.validate(); + assertNull(validate); + + id = randomAlphaOfLength(513); + request = new UpdateRequest("index", id); + request.doc("{}", XContentType.JSON); + validate = request.validate(); + assertThat(validate, notNullValue()); + assertThat(validate.getMessage(), containsString("id [" + id + "] is too long, must be no longer than 512 bytes but was: 513")); + } + public void testValidate() { { UpdateRequest request = new UpdateRequest("index", "id"); diff --git a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java index 5175fee7edceb..97c52ef2edc37 100644 --- a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java +++ b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; @@ -76,7 +77,7 @@ public void setUp() throws Exception { .put("node.name", "test-" + getTestName()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - threadPool = new ThreadPool(settings); + threadPool = new ThreadPool(settings, MeterRegistry.NOOP); client = buildClient(settings, ACTIONS); } diff --git a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index 75439578448a4..5e122c4050b6c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -36,38 +36,38 @@ public class DiskUsageTests extends ESTestCase { public void testDiskUsageCalc() { DiskUsage du = new DiskUsage("node1", "n1", "random", 100, 40); - assertThat(du.getFreeDiskAsPercentage(), equalTo(40.0)); - assertThat(du.getUsedDiskAsPercentage(), equalTo(100.0 - 40.0)); - assertThat(du.getFreeBytes(), equalTo(40L)); - assertThat(du.getUsedBytes(), equalTo(60L)); - assertThat(du.getTotalBytes(), equalTo(100L)); + assertThat(du.freeDiskAsPercentage(), equalTo(40.0)); + assertThat(du.usedDiskAsPercentage(), equalTo(100.0 - 40.0)); + assertThat(du.freeBytes(), equalTo(40L)); + assertThat(du.usedBytes(), equalTo(60L)); + assertThat(du.totalBytes(), equalTo(100L)); DiskUsage du2 = new DiskUsage("node1", "n1", "random", 100, 55); - assertThat(du2.getFreeDiskAsPercentage(), equalTo(55.0)); - assertThat(du2.getUsedDiskAsPercentage(), equalTo(45.0)); - assertThat(du2.getFreeBytes(), equalTo(55L)); - assertThat(du2.getUsedBytes(), equalTo(45L)); - assertThat(du2.getTotalBytes(), equalTo(100L)); + assertThat(du2.freeDiskAsPercentage(), equalTo(55.0)); + assertThat(du2.usedDiskAsPercentage(), equalTo(45.0)); + assertThat(du2.freeBytes(), equalTo(55L)); + assertThat(du2.usedBytes(), equalTo(45L)); + assertThat(du2.totalBytes(), equalTo(100L)); // Test that DiskUsage handles invalid numbers, as reported by some // filesystems (ZFS & NTFS) DiskUsage du3 = new DiskUsage("node1", "n1", "random", 100, 101); - assertThat(du3.getFreeDiskAsPercentage(), equalTo(101.0)); - assertThat(du3.getFreeBytes(), equalTo(101L)); - assertThat(du3.getUsedBytes(), equalTo(-1L)); - assertThat(du3.getTotalBytes(), equalTo(100L)); + assertThat(du3.freeDiskAsPercentage(), equalTo(101.0)); + assertThat(du3.freeBytes(), equalTo(101L)); + assertThat(du3.usedBytes(), equalTo(-1L)); + assertThat(du3.totalBytes(), equalTo(100L)); DiskUsage du4 = new DiskUsage("node1", "n1", "random", -1, -1); - assertThat(du4.getFreeDiskAsPercentage(), equalTo(100.0)); - assertThat(du4.getFreeBytes(), equalTo(-1L)); - assertThat(du4.getUsedBytes(), equalTo(0L)); - assertThat(du4.getTotalBytes(), equalTo(-1L)); + assertThat(du4.freeDiskAsPercentage(), equalTo(100.0)); + assertThat(du4.freeBytes(), equalTo(-1L)); + assertThat(du4.usedBytes(), equalTo(0L)); + assertThat(du4.totalBytes(), equalTo(-1L)); DiskUsage du5 = new DiskUsage("node1", "n1", "random", 0, 0); - assertThat(du5.getFreeDiskAsPercentage(), equalTo(100.0)); - assertThat(du5.getFreeBytes(), equalTo(0L)); - assertThat(du5.getUsedBytes(), equalTo(0L)); - assertThat(du5.getTotalBytes(), equalTo(0L)); + assertThat(du5.freeDiskAsPercentage(), equalTo(100.0)); + assertThat(du5.freeBytes(), equalTo(0L)); + assertThat(du5.usedBytes(), equalTo(0L)); + assertThat(du5.totalBytes(), equalTo(0L)); } public void testRandomDiskUsage() { @@ -77,17 +77,17 @@ public void testRandomDiskUsage() { long free = between(Integer.MIN_VALUE, Integer.MAX_VALUE); DiskUsage du = new DiskUsage("random", "random", "random", total, free); if (total == 0) { - assertThat(du.getFreeBytes(), equalTo(free)); - assertThat(du.getTotalBytes(), equalTo(0L)); - assertThat(du.getUsedBytes(), equalTo(-free)); - assertThat(du.getFreeDiskAsPercentage(), equalTo(100.0)); - assertThat(du.getUsedDiskAsPercentage(), equalTo(0.0)); + assertThat(du.freeBytes(), equalTo(free)); + assertThat(du.totalBytes(), equalTo(0L)); + assertThat(du.usedBytes(), equalTo(-free)); + assertThat(du.freeDiskAsPercentage(), equalTo(100.0)); + assertThat(du.usedDiskAsPercentage(), equalTo(0.0)); } else { - assertThat(du.getFreeBytes(), equalTo(free)); - assertThat(du.getTotalBytes(), equalTo(total)); - assertThat(du.getUsedBytes(), equalTo(total - free)); - assertThat(du.getFreeDiskAsPercentage(), equalTo(100.0 * free / total)); - assertThat(du.getUsedDiskAsPercentage(), equalTo(100.0 - (100.0 * free / total))); + assertThat(du.freeBytes(), equalTo(free)); + assertThat(du.totalBytes(), equalTo(total)); + assertThat(du.usedBytes(), equalTo(total - free)); + assertThat(du.freeDiskAsPercentage(), equalTo(100.0 * free / total)); + assertThat(du.usedDiskAsPercentage(), equalTo(100.0 - (100.0 * free / total))); } } } @@ -347,9 +347,9 @@ public void testLeastAndMostAvailableDiskSpaceSomeInvalidValues() { private void assertDiskUsage(DiskUsage usage, FsInfo.Path path) { assertNotNull(usage); assertNotNull(path); - assertEquals(usage.toString(), usage.getPath(), path.getPath()); - assertEquals(usage.toString(), usage.getTotalBytes(), path.getTotal().getBytes()); - assertEquals(usage.toString(), usage.getFreeBytes(), path.getAvailable().getBytes()); + assertEquals(usage.toString(), usage.path(), path.getPath()); + assertEquals(usage.toString(), usage.totalBytes(), path.getTotal().getBytes()); + assertEquals(usage.toString(), usage.freeBytes(), path.getAvailable().getBytes()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java index 395dde29597d3..ae557b1b418da 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -42,6 +42,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptySet; +import static java.util.Collections.singleton; import static java.util.Collections.singletonList; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.BOOTSTRAP_PLACEHOLDER_PREFIX; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; @@ -91,6 +92,7 @@ public void testScheduling() { clusterState, emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -189,6 +191,7 @@ public void testDescriptionOnMasterIneligibleNodes() { clusterState, emptyList(), emptyList(), + emptySet(), 15L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -207,6 +210,7 @@ public void testDescriptionOnMasterIneligibleNodes() { clusterState, singletonList(otherAddress), emptyList(), + emptySet(), 16L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -226,6 +230,7 @@ public void testDescriptionOnMasterIneligibleNodes() { clusterState, emptyList(), singletonList(otherNode), + emptySet(), 17L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -253,6 +258,7 @@ public void testDescriptionOnUnhealthyNodes() { clusterState, emptyList(), emptyList(), + emptySet(), 15L, electionStrategy, new StatusInfo(UNHEALTHY, "unhealthy-info"), @@ -273,6 +279,7 @@ public void testDescriptionOnUnhealthyNodes() { clusterState, emptyList(), emptyList(), + emptySet(), 15L, electionStrategy, new StatusInfo(UNHEALTHY, "unhealthy-info"), @@ -296,6 +303,7 @@ public void testDescriptionBeforeBootstrapping() { clusterState, emptyList(), emptyList(), + emptySet(), 1L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -317,6 +325,7 @@ public void testDescriptionBeforeBootstrapping() { clusterState, singletonList(otherAddress), emptyList(), + emptySet(), 2L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -340,6 +349,7 @@ public void testDescriptionBeforeBootstrapping() { clusterState, emptyList(), singletonList(otherNode), + emptySet(), 3L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -362,6 +372,7 @@ public void testDescriptionBeforeBootstrapping() { clusterState, emptyList(), emptyList(), + emptySet(), 4L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -414,6 +425,7 @@ public void testDescriptionAfterDetachCluster() { clusterState, emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -435,6 +447,7 @@ public void testDescriptionAfterDetachCluster() { clusterState, singletonList(otherAddress), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -458,6 +471,7 @@ public void testDescriptionAfterDetachCluster() { clusterState, emptyList(), singletonList(otherNode), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -481,6 +495,7 @@ public void testDescriptionAfterDetachCluster() { clusterState, emptyList(), singletonList(yetAnotherNode), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -510,6 +525,7 @@ public void testDescriptionAfterBootstrapping() { clusterState, emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -531,6 +547,7 @@ public void testDescriptionAfterBootstrapping() { clusterState, singletonList(otherAddress), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -554,6 +571,7 @@ public void testDescriptionAfterBootstrapping() { clusterState, emptyList(), singletonList(otherNode), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -577,6 +595,7 @@ public void testDescriptionAfterBootstrapping() { clusterState, emptyList(), singletonList(yetAnotherNode), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -593,12 +612,39 @@ public void testDescriptionAfterBootstrapping() { ) ); + final DiscoveryNode recentMaster = makeDiscoveryNode("recentMaster"); + assertThat( + new ClusterFormationState( + Settings.EMPTY, + clusterState, + emptyList(), + singletonList(yetAnotherNode), + singleton(recentMaster), + 0L, + electionStrategy, + new StatusInfo(HEALTHY, "healthy-info"), + emptyList() + ).getDescription(), + is( + "master not discovered or elected yet, an election requires a node with id [otherNode], " + + "have only discovered non-quorum [" + + noAttr(yetAnotherNode) + + "] who claim current master to be [" + + noAttr(recentMaster) + + "]; " + + "discovery will continue using [] from hosts providers and [" + + noAttr(localNode) + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0" + ) + ); + assertThat( new ClusterFormationState( Settings.EMPTY, state(localNode, "n1", "n2"), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -619,6 +665,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, "n1", "n2", "n3"), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -639,6 +686,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, "n1", "n2", BOOTSTRAP_PLACEHOLDER_PREFIX + "n3"), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -659,6 +707,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, "n1", "n2", "n3", "n4"), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -679,6 +728,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, "n1", "n2", "n3", "n4", "n5"), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -699,6 +749,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, "n1", "n2", "n3", "n4", BOOTSTRAP_PLACEHOLDER_PREFIX + "n5"), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -719,6 +770,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, "n1", "n2", "n3", BOOTSTRAP_PLACEHOLDER_PREFIX + "n4", BOOTSTRAP_PLACEHOLDER_PREFIX + "n5"), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -739,6 +791,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, new String[] { "n1" }, new String[] { "n1" }), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -759,6 +812,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, new String[] { "n1" }, new String[] { "n2" }), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -779,6 +833,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, new String[] { "n1" }, new String[] { "n2", "n3" }), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -799,6 +854,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, new String[] { "n1" }, new String[] { "n2", "n3", "n4" }), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -841,6 +897,7 @@ public void testDescriptionAfterBootstrapping() { stateWithOtherNodes, emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -875,6 +932,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, GatewayMetaState.STALE_STATE_CONFIG_NODE_ID), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -910,6 +968,7 @@ public void testJoinStatusReporting() { clusterState, emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -924,6 +983,7 @@ public void testJoinStatusReporting() { clusterState, emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -970,6 +1030,7 @@ public void testJoinStatusReporting() { clusterState, emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -987,6 +1048,10 @@ public void testClusterFormationStateSerialization() { DiscoveryNodeUtils.create(UUID.randomUUID().toString()), DiscoveryNodeUtils.create(UUID.randomUUID().toString()) ); + Set mastersOfPeers = Set.of( + DiscoveryNodeUtils.create(UUID.randomUUID().toString()), + DiscoveryNodeUtils.create(UUID.randomUUID().toString()) + ); List joinStatuses = List.of( new JoinStatus( DiscoveryNodeUtils.create(UUID.randomUUID().toString()), @@ -1001,6 +1066,7 @@ public void testClusterFormationStateSerialization() { state(localNode, new String[] { "n1" }, new String[] { "n2", "n3", "n4" }), resolvedAddresses, foundPeers, + mastersOfPeers, 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -1035,6 +1101,7 @@ private ClusterFormationState mutateClusterFormationState(ClusterFormationState final DiscoveryNode localNode = originalClusterFormationState.localNode(); List resolvedAddresses = originalClusterFormationState.resolvedAddresses(); List foundPeers = originalClusterFormationState.foundPeers(); + Set mastersOfPeers = originalClusterFormationState.mastersOfPeers(); long currentTerm = originalClusterFormationState.currentTerm(); StatusInfo statusInfo = originalClusterFormationState.statusInfo(); List joinStatuses = originalClusterFormationState.inFlightJoinStatuses(); @@ -1043,13 +1110,14 @@ private ClusterFormationState mutateClusterFormationState(ClusterFormationState originalClusterFormationState.lastAcceptedConfiguration(), originalClusterFormationState.lastCommittedConfiguration() ); - switch (randomIntBetween(1, 5)) { + switch (randomIntBetween(1, 6)) { case 1 -> { return new ClusterFormationState( settings, clusterState, resolvedAddresses, foundPeers, + mastersOfPeers, currentTerm + 1, electionStrategy, statusInfo, @@ -1064,6 +1132,7 @@ private ClusterFormationState mutateClusterFormationState(ClusterFormationState clusterState, resolvedAddresses, newFoundPeers, + mastersOfPeers, currentTerm, electionStrategy, statusInfo, @@ -1085,6 +1154,7 @@ private ClusterFormationState mutateClusterFormationState(ClusterFormationState clusterState, resolvedAddresses, foundPeers, + mastersOfPeers, currentTerm, electionStrategy, statusInfo, @@ -1098,6 +1168,7 @@ private ClusterFormationState mutateClusterFormationState(ClusterFormationState clusterState, resolvedAddresses, foundPeers, + mastersOfPeers, currentTerm, electionStrategy, newStatusInfo, @@ -1110,6 +1181,26 @@ private ClusterFormationState mutateClusterFormationState(ClusterFormationState clusterState, resolvedAddresses, foundPeers, + mastersOfPeers, + currentTerm, + electionStrategy, + statusInfo, + joinStatuses + ); + } + case 6 -> { + List newMastersOfPeers = new ArrayList<>(mastersOfPeers); + if (mastersOfPeers.isEmpty() || randomBoolean()) { + newMastersOfPeers.add(DiscoveryNodeUtils.create(UUID.randomUUID().toString())); + } else { + newMastersOfPeers.remove(0); + } + return new ClusterFormationState( + settings, + clusterState, + resolvedAddresses, + foundPeers, + Set.copyOf(newMastersOfPeers), currentTerm, electionStrategy, statusInfo, diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java index 0d93dfb3d7f62..2ad0f18de277f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java @@ -993,6 +993,7 @@ private ClusterFormationFailureHelper.ClusterFormationState getClusterFormationS hasDiscoveredAllNodes ? allMasterEligibleNodes : randomSubsetOf(randomInt(allMasterEligibleNodes.size() - 1), allMasterEligibleNodes), + Collections.emptySet(), randomLong(), hasDiscoveredQuorum, new StatusInfo(randomFrom(StatusInfo.Status.HEALTHY, StatusInfo.Status.UNHEALTHY), randomAlphaOfLength(20)), diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java index 18385b1d7ad44..77c59fe9e8209 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java @@ -43,9 +43,9 @@ import java.util.Map; import java.util.UUID; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -126,9 +126,14 @@ public void testGetHealthIndicatorResultNotGreenVerboseTrue() throws Exception { assertThat(nodeIdToClusterFormationMap.get(node2.getId()), equalTo(node2ClusterFormation)); assertThat(nodeIdToNodeNameMap.get(node1.getId()), equalTo(node1.getName())); assertThat(nodeIdToNodeNameMap.get(node2.getId()), equalTo(node2.getName())); - List diagnosis = result.diagnosisList(); - assertThat(diagnosis.size(), equalTo(1)); - assertThat(diagnosis.get(0), is(StableMasterHealthIndicatorService.CONTACT_SUPPORT)); + assertThat( + result.diagnosisList(), + containsInAnyOrder( + StableMasterHealthIndicatorService.CONTACT_SUPPORT, + StableMasterHealthIndicatorService.TROUBLESHOOT_DISCOVERY, + StableMasterHealthIndicatorService.TROUBLESHOOT_UNSTABLE_CLUSTER + ) + ); } public void testGetHealthIndicatorResultNotGreenVerboseFalse() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java index 1c2b35fe050f5..680f538325465 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java @@ -1157,7 +1157,7 @@ public void testAccountForSizeOfAllInitializingShardsDuringAllocation() { @Deprecated private static ClusterInfo createClusterInfo(List diskUsages, Map shardSizes) { - var diskUsage = diskUsages.stream().collect(toMap(DiskUsage::getNodeId, Function.identity())); + var diskUsage = diskUsages.stream().collect(toMap(usage -> usage.nodeId(), Function.identity())); return new ClusterInfo(diskUsage, diskUsage, shardSizes, Map.of(), Map.of(), Map.of()); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 2b182b29c9971..716e7c80a6cde 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -595,8 +595,8 @@ public void testAverageUsage() { usages.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 0)); // 100% used DiskUsage node1Usage = DiskThresholdDecider.averageUsage(rn, usages); - assertThat(node1Usage.getTotalBytes(), equalTo(100L)); - assertThat(node1Usage.getFreeBytes(), equalTo(25L)); + assertThat(node1Usage.totalBytes(), equalTo(100L)); + assertThat(node1Usage.freeBytes(), equalTo(25L)); } private void doTestShardRelocationsTakenIntoAccount(boolean testMaxHeadroom) { @@ -1231,7 +1231,8 @@ private void doTestDiskThresholdWithSnapshotShardSizes(boolean testMaxHeadroom) logger.info("--> simulating snapshot shards size retrieval success"); snapshotShardSizes.put(snapshotShard, shardSizeInBytes); logger.info("--> shard allocation depends on its size"); - shouldAllocate = shardSizeInBytes < usages.get("node1").getFreeBytes(); + DiskUsage usage = usages.get("node1"); + shouldAllocate = shardSizeInBytes < usage.freeBytes(); } else { logger.info("--> simulating snapshot shards size retrieval failure"); snapshotShardSizes.put(snapshotShard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java index fb9bde31e8fc4..cb1dddd7c51f3 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Processors; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.Matcher; import java.util.Locale; @@ -501,4 +502,41 @@ public void testNodeProcessorsFloatValidation() { } } + // This test must complete to ensure that our basic infrastructure is working as expected. + // Specifically that ExecutorScalingQueue, which subclasses LinkedTransferQueue, correctly + // tracks tasks submitted to the executor. + public void testBasicTaskExecution() { + final var executorService = EsExecutors.newScaling( + "test", + 0, + between(1, 5), + 60, + TimeUnit.SECONDS, + randomBoolean(), + EsExecutors.daemonThreadFactory("test"), + new ThreadContext(Settings.EMPTY) + ); + try { + final var countDownLatch = new CountDownLatch(between(1, 10)); + class TestTask extends AbstractRunnable { + @Override + protected void doRun() { + countDownLatch.countDown(); + if (countDownLatch.getCount() > 0) { + executorService.execute(TestTask.this); + } + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + } + + executorService.execute(new TestTask()); + safeAwait(countDownLatch); + } finally { + ThreadPool.terminate(executorService, 10, TimeUnit.SECONDS); + } + } } diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/ExecutorScalingQueueTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/ExecutorScalingQueueTests.java new file mode 100644 index 0000000000000..b1e1b9d620d2a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/ExecutorScalingQueueTests.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.util.concurrent; + +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.TimeUnit; + +public class ExecutorScalingQueueTests extends ESTestCase { + + public void testPut() { + var queue = new EsExecutors.ExecutorScalingQueue<>(); + queue.put(new Object()); + assertEquals(queue.size(), 1); + } + + public void testAdd() { + var queue = new EsExecutors.ExecutorScalingQueue<>(); + assertTrue(queue.add(new Object())); + assertEquals(queue.size(), 1); + } + + public void testTimedOffer() { + var queue = new EsExecutors.ExecutorScalingQueue<>(); + assertTrue(queue.offer(new Object(), 60, TimeUnit.SECONDS)); + assertEquals(queue.size(), 1); + } + +} diff --git a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java index ce5841d066d88..209261e8dce70 100644 --- a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java @@ -883,6 +883,46 @@ public boolean innerMatch(LogEvent event) { } } + @TestLogging(reason = "testing logging at WARN level", value = "org.elasticsearch.discovery:WARN") + public void testEventuallyLogsIfReturnedMasterIsUnreachable() { + final DiscoveryNode otherNode = newDiscoveryNode("node-from-hosts-list"); + providedAddresses.add(otherNode.getAddress()); + transportAddressConnector.addReachableNode(otherNode); + + peerFinder.activate(lastAcceptedNodes); + final long endTime = deterministicTaskQueue.getCurrentTimeMillis() + VERBOSITY_INCREASE_TIMEOUT_SETTING.get(Settings.EMPTY).millis() + + PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING.get(Settings.EMPTY).millis(); + + runAllRunnableTasks(); + + assertFoundPeers(otherNode); + final DiscoveryNode unreachableMaster = newDiscoveryNode("unreachable-master"); + transportAddressConnector.unreachableAddresses.add(unreachableMaster.getAddress()); + + MockLogAppender.assertThatLogger(() -> { + while (deterministicTaskQueue.getCurrentTimeMillis() <= endTime) { + deterministicTaskQueue.advanceTime(); + runAllRunnableTasks(); + respondToRequests(node -> { + assertThat(node, is(otherNode)); + return new PeersResponse(Optional.of(unreachableMaster), emptyList(), randomNonNegativeLong()); + }); + } + }, + PeerFinder.class, + new MockLogAppender.SeenEventExpectation( + "discovery result", + "org.elasticsearch.discovery.PeerFinder", + Level.WARN, + "address [" + unreachableMaster.getAddress() + "]* [current master according to *node-from-hosts-list*" + ) + ); + + assertFoundPeers(otherNode); + assertThat(peerFinder.discoveredMasterNode, nullValue()); + assertFalse(peerFinder.discoveredMasterTerm.isPresent()); + } + public void testReconnectsToDisconnectedNodes() { final DiscoveryNode otherNode = newDiscoveryNode("original-node"); providedAddresses.add(otherNode.getAddress()); diff --git a/server/src/test/java/org/elasticsearch/http/HttpClientStatsTrackerTests.java b/server/src/test/java/org/elasticsearch/http/HttpClientStatsTrackerTests.java index 99e99540489c5..2dfaaf34bb1f1 100644 --- a/server/src/test/java/org/elasticsearch/http/HttpClientStatsTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/http/HttpClientStatsTrackerTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.node.Node; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -437,7 +438,7 @@ private static class FakeTimeThreadPool extends ThreadPool { private final long absoluteTimeOffset = randomLong(); FakeTimeThreadPool() { - super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build()); + super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), MeterRegistry.NOOP); stopCachedTimeThread(); setRandomTime(); } diff --git a/server/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderRefreshTests.java b/server/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderRefreshTests.java index 6a87c0f704600..6577148d78c7b 100644 --- a/server/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderRefreshTests.java +++ b/server/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderRefreshTests.java @@ -9,8 +9,8 @@ package org.elasticsearch.index.fieldstats; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.rest.RestStatus; @@ -88,7 +88,7 @@ private void assertRequestCacheStats(long expectedHits, long expectedMisses) { } private void refreshIndex() { - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("index").get(); + BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh("index").get(); assertThat(refreshResponse.getSuccessfulShards(), equalTo(refreshResponse.getSuccessfulShards())); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java index e43fa379054bf..c3d2d6a3f194b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java @@ -8,6 +8,11 @@ package org.elasticsearch.index.mapper.vectors; +import org.apache.lucene.queries.function.FunctionQuery; +import org.apache.lucene.queries.function.valuesource.ByteVectorSimilarityFunction; +import org.apache.lucene.queries.function.valuesource.FloatVectorSimilarityFunction; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.KnnByteVectorQuery; import org.apache.lucene.search.KnnFloatVectorQuery; import org.apache.lucene.search.Query; @@ -158,6 +163,64 @@ public void testCreateNestedKnnQuery() { } } + public void testExactKnnQuery() { + int dims = randomIntBetween(2, 2048); + { + DenseVectorFieldType field = new DenseVectorFieldType( + "f", + IndexVersion.current(), + DenseVectorFieldMapper.ElementType.FLOAT, + dims, + true, + VectorSimilarity.COSINE, + Collections.emptyMap() + ); + float[] queryVector = new float[dims]; + for (int i = 0; i < dims; i++) { + queryVector[i] = randomFloat(); + } + Query query = field.createExactKnnQuery(queryVector); + assertTrue(query instanceof BooleanQuery); + BooleanQuery booleanQuery = (BooleanQuery) query; + boolean foundFunction = false; + for (BooleanClause clause : booleanQuery) { + if (clause.getQuery() instanceof FunctionQuery functionQuery) { + foundFunction = true; + assertTrue(functionQuery.getValueSource() instanceof FloatVectorSimilarityFunction); + } + } + assertTrue("Unable to find FloatVectorSimilarityFunction in created BooleanQuery", foundFunction); + } + { + DenseVectorFieldType field = new DenseVectorFieldType( + "f", + IndexVersion.current(), + DenseVectorFieldMapper.ElementType.BYTE, + dims, + true, + VectorSimilarity.COSINE, + Collections.emptyMap() + ); + byte[] queryVector = new byte[dims]; + float[] floatQueryVector = new float[dims]; + for (int i = 0; i < dims; i++) { + queryVector[i] = randomByte(); + floatQueryVector[i] = queryVector[i]; + } + Query query = field.createExactKnnQuery(floatQueryVector); + assertTrue(query instanceof BooleanQuery); + BooleanQuery booleanQuery = (BooleanQuery) query; + boolean foundFunction = false; + for (BooleanClause clause : booleanQuery) { + if (clause.getQuery() instanceof FunctionQuery functionQuery) { + foundFunction = true; + assertTrue(functionQuery.getValueSource() instanceof ByteVectorSimilarityFunction); + } + } + assertTrue("Unable to find FloatVectorSimilarityFunction in created BooleanQuery", foundFunction); + } + } + public void testFloatCreateKnnQuery() { DenseVectorFieldType unindexedField = new DenseVectorFieldType( "f", diff --git a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index 516f65111afca..137e0cb348a9c 100644 --- a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -28,8 +28,12 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.search.vectors.ExactKnnQueryBuilder; +import org.elasticsearch.search.vectors.KnnVectorQueryBuilder; import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.test.TransportVersionUtils; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; import org.hamcrest.Matchers; import java.io.IOException; @@ -48,6 +52,9 @@ public class NestedQueryBuilderTests extends AbstractQueryTestCase { + private static final String VECTOR_FIELD = "vector"; + private static final int VECTOR_DIMENSION = 3; + @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { mapperService.merge( @@ -76,6 +83,27 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws ), MapperService.MergeReason.MAPPING_UPDATE ); + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .startObject("properties") + .startObject(VECTOR_FIELD) + .field("type", "dense_vector") + .field("dims", VECTOR_DIMENSION) + .field("index", true) + .field("similarity", "cosine") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + mapperService.merge( + MapperService.SINGLE_MAPPING_NAME, + new CompressedXContent(Strings.toString(builder)), + MapperService.MergeReason.MAPPING_UPDATE + ); } /** @@ -233,6 +261,27 @@ public void testMustRewrite() throws IOException { assertEquals("Rewrite first", e.getMessage()); } + public void testKnnRewriteForInnerHits() throws IOException { + SearchExecutionContext context = createSearchExecutionContext(); + context.setAllowUnmappedFields(true); + KnnVectorQueryBuilder innerQueryBuilder = new KnnVectorQueryBuilder( + "nested1." + VECTOR_FIELD, + new float[] { 1.0f, 2.0f, 3.0f }, + 1, + null + ); + NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder( + "nested1", + innerQueryBuilder, + RandomPicks.randomFrom(random(), ScoreMode.values()) + ); + InnerHitsRewriteContext rewriteContext = new InnerHitsRewriteContext(context.getParserConfig(), context::nowInMillis); + QueryBuilder queryBuilder = Rewriteable.rewrite(nestedQueryBuilder, rewriteContext, true); + assertTrue(queryBuilder instanceof NestedQueryBuilder); + NestedQueryBuilder rewritten = (NestedQueryBuilder) queryBuilder; + assertTrue(rewritten.query() instanceof ExactKnnQueryBuilder); + } + public void testIgnoreUnmapped() throws IOException { final NestedQueryBuilder queryBuilder = new NestedQueryBuilder("unmapped", new MatchAllQueryBuilder(), ScoreMode.None); queryBuilder.ignoreUnmapped(true); diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java index ef32360722474..f8162eb987226 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; @@ -16,7 +17,10 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.reindex.BulkByScrollTask.Status; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; @@ -28,10 +32,77 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.core.TimeValue.timeValueMillis; public class BulkByScrollResponseTests extends AbstractXContentTestCase { + private static final ObjectParser PARSER = new ObjectParser<>( + "bulk_by_scroll_response", + true, + BulkByScrollResponseBuilder::new + ); + static { + PARSER.declareLong(BulkByScrollResponseBuilder::setTook, new ParseField(BulkByScrollResponse.TOOK_FIELD)); + PARSER.declareBoolean(BulkByScrollResponseBuilder::setTimedOut, new ParseField(BulkByScrollResponse.TIMED_OUT_FIELD)); + PARSER.declareObjectArray( + BulkByScrollResponseBuilder::setFailures, + (p, c) -> parseFailure(p), + new ParseField(BulkByScrollResponse.FAILURES_FIELD) + ); + // since the result of BulkByScrollResponse.Status are mixed we also parse that in this + Status.declareFields(PARSER); + } + + private static Object parseFailure(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + XContentParser.Token token; + String index = null; + String id = null; + Integer status = null; + Integer shardId = null; + String nodeId = null; + ElasticsearchException bulkExc = null; + ElasticsearchException searchExc = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + String name = parser.currentName(); + token = parser.nextToken(); + if (token == XContentParser.Token.START_ARRAY) { + parser.skipChildren(); + } else if (token == XContentParser.Token.START_OBJECT) { + switch (name) { + case ScrollableHitSource.SearchFailure.REASON_FIELD -> searchExc = ElasticsearchException.fromXContent(parser); + case Failure.CAUSE_FIELD -> bulkExc = ElasticsearchException.fromXContent(parser); + default -> parser.skipChildren(); + } + } else if (token == XContentParser.Token.VALUE_STRING) { + switch (name) { + // This field is the same as SearchFailure.index + case Failure.INDEX_FIELD -> index = parser.text(); + case Failure.ID_FIELD -> id = parser.text(); + case ScrollableHitSource.SearchFailure.NODE_FIELD -> nodeId = parser.text(); + } + } else if (token == XContentParser.Token.VALUE_NUMBER) { + switch (name) { + case Failure.STATUS_FIELD -> status = parser.intValue(); + case ScrollableHitSource.SearchFailure.SHARD_FIELD -> shardId = parser.intValue(); + } + } + } + if (bulkExc != null) { + return new Failure(index, id, bulkExc, RestStatus.fromCode(status)); + } else if (searchExc != null) { + if (status == null) { + return new ScrollableHitSource.SearchFailure(searchExc, index, shardId, nodeId); + } else { + return new ScrollableHitSource.SearchFailure(searchExc, index, shardId, nodeId, RestStatus.fromCode(status)); + } + } else { + throw new ElasticsearchParseException("failed to parse failures array. At least one of {reason,cause} must be present"); + } + } + private boolean includeUpdated; private boolean includeCreated; private boolean testExceptions = randomBoolean(); @@ -160,7 +231,7 @@ protected BulkByScrollResponse createTestInstance() { @Override protected BulkByScrollResponse doParseInstance(XContentParser parser) throws IOException { - return BulkByScrollResponse.fromXContent(parser); + return PARSER.apply(parser, null).buildResponse(); } @Override diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java index 7c85cba4c34eb..ee35491a74d00 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java @@ -9,7 +9,7 @@ package org.elasticsearch.persistent; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -253,12 +253,12 @@ public void testParamsStatusAndNodeTaskAreDelegated() throws Exception { public void testTaskCancellation() { AtomicLong capturedTaskId = new AtomicLong(); - AtomicReference> capturedListener = new AtomicReference<>(); + AtomicReference> capturedListener = new AtomicReference<>(); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); PersistentTasksService persistentTasksService = new PersistentTasksService(null, null, client) { @Override - void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { + void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { capturedTaskId.set(taskId); capturedListener.set(listener); } @@ -327,8 +327,7 @@ public void sendCompletionRequest( // That should trigger cancellation request assertThat(capturedTaskId.get(), equalTo(localId)); // Notify successful cancellation - capturedListener.get() - .onResponse(new CancelTasksResponse(Collections.emptyList(), Collections.emptyList(), Collections.emptyList())); + capturedListener.get().onResponse(new ListTasksResponse(Collections.emptyList(), Collections.emptyList(), Collections.emptyList())); // finish or fail task if (randomBoolean()) { @@ -349,7 +348,7 @@ public void testTaskLocalAbort() { when(client.settings()).thenReturn(Settings.EMPTY); PersistentTasksService persistentTasksService = new PersistentTasksService(null, null, client) { @Override - void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { + void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { fail("Shouldn't be called during local abort"); } diff --git a/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java b/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java index 41710d6c1b76c..4125c9bb66b4f 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java @@ -49,6 +49,7 @@ import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; import static org.elasticsearch.ElasticsearchExceptionTests.assertDeepEquals; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -420,7 +421,7 @@ public void testErrorToAndFromXContent() throws IOException { ElasticsearchException parsedError; try (XContentParser parser = createParser(xContentType.xContent(), response.content())) { - parsedError = RestResponse.errorFromXContent(parser); + parsedError = errorFromXContent(parser); assertNull(parser.nextToken()); } @@ -436,13 +437,49 @@ public void testNoErrorFromXContent() throws IOException { builder.endObject(); try (XContentParser parser = createParser(builder.contentType().xContent(), BytesReference.bytes(builder))) { - RestResponse.errorFromXContent(parser); + errorFromXContent(parser); } } }); assertEquals("Failed to parse elasticsearch status exception: no exception was found", e.getMessage()); } + private static ElasticsearchStatusException errorFromXContent(XContentParser parser) throws IOException { + XContentParser.Token token = parser.nextToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + + ElasticsearchException exception = null; + RestStatus status = null; + + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } + if (RestResponse.STATUS.equals(currentFieldName)) { + if (token != XContentParser.Token.FIELD_NAME) { + ensureExpectedToken(XContentParser.Token.VALUE_NUMBER, token, parser); + status = RestStatus.fromCode(parser.intValue()); + } + } else { + exception = ElasticsearchException.failureFromXContent(parser); + } + } + + if (exception == null) { + throw new IllegalStateException("Failed to parse elasticsearch status exception: no exception was found"); + } + + ElasticsearchStatusException result = new ElasticsearchStatusException(exception.getMessage(), status, exception.getCause()); + for (String header : exception.getHeaderKeys()) { + result.addHeader(header, exception.getHeader(header)); + } + for (String metadata : exception.getMetadataKeys()) { + result.addMetadata(metadata, exception.getMetadata(metadata)); + } + return result; + } + public void testResponseContentTypeUponException() throws Exception { String mediaType = XContentType.VND_JSON.toParsedMediaType() .responseContentTypeHeader( diff --git a/server/src/test/java/org/elasticsearch/search/SearchHitTests.java b/server/src/test/java/org/elasticsearch/search/SearchHitTests.java index e9bf6f83f5bbc..40bdc3da37242 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchHitTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchHitTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.Index; @@ -143,7 +144,7 @@ public static SearchHit createTestItem(XContentType xContentType, boolean withOp @Override protected Writeable.Reader instanceReader() { - return SearchHit::readFrom; + return in -> SearchHit.readFrom(in, randomBoolean()); } @Override @@ -159,16 +160,20 @@ protected SearchHit mutateInstance(SearchHit instance) { public void testFromXContent() throws IOException { XContentType xContentType = randomFrom(XContentType.values()).canonical(); SearchHit searchHit = createTestItem(xContentType, true, false); - boolean humanReadable = randomBoolean(); - BytesReference originalBytes = toShuffledXContent(searchHit, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); - SearchHit parsed; - try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { - parser.nextToken(); // jump to first START_OBJECT - parsed = SearchHit.fromXContent(parser); - assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); - assertNull(parser.nextToken()); + try { + boolean humanReadable = randomBoolean(); + BytesReference originalBytes = toShuffledXContent(searchHit, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + SearchHit parsed; + try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + parser.nextToken(); // jump to first START_OBJECT + parsed = SearchHit.fromXContent(parser); + assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); + assertNull(parser.nextToken()); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, humanReadable), xContentType); + } finally { + searchHit.decRef(); } - assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, humanReadable), xContentType); } /** @@ -184,22 +189,26 @@ public void testFromXContent() throws IOException { public void testFromXContentLenientParsing() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); SearchHit searchHit = createTestItem(xContentType, true, true); - BytesReference originalBytes = toXContent(searchHit, xContentType, true); - Predicate pathsToExclude = path -> path.endsWith("highlight") - || path.contains("fields") - || path.contains("_source") - || path.contains("inner_hits") - || path.isEmpty(); - BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, pathsToExclude, random()); - - SearchHit parsed; - try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { - parser.nextToken(); // jump to first START_OBJECT - parsed = SearchHit.fromXContent(parser); - assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); - assertNull(parser.nextToken()); + try { + BytesReference originalBytes = toXContent(searchHit, xContentType, true); + Predicate pathsToExclude = path -> path.endsWith("highlight") + || path.contains("fields") + || path.contains("_source") + || path.contains("inner_hits") + || path.isEmpty(); + BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, pathsToExclude, random()); + + SearchHit parsed; + try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { + parser.nextToken(); // jump to first START_OBJECT + parsed = SearchHit.fromXContent(parser); + assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); + assertNull(parser.nextToken()); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, true), xContentType); + } finally { + searchHit.decRef(); } - assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, true), xContentType); } /** @@ -221,15 +230,19 @@ public void testFromXContentWithoutTypeAndId() throws IOException { public void testToXContent() throws IOException { SearchHit searchHit = new SearchHit(1, "id1"); - searchHit.score(1.5f); - XContentBuilder builder = JsonXContent.contentBuilder(); - searchHit.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals(""" - {"_id":"id1","_score":1.5}""", Strings.toString(builder)); + try { + searchHit.score(1.5f); + XContentBuilder builder = JsonXContent.contentBuilder(); + searchHit.toXContent(builder, ToXContent.EMPTY_PARAMS); + assertEquals(""" + {"_id":"id1","_score":1.5}""", Strings.toString(builder)); + } finally { + searchHit.decRef(); + } } public void testRankToXContent() throws IOException { - SearchHit searchHit = new SearchHit(1, "id1"); + SearchHit searchHit = SearchHit.unpooled(1, "id1"); searchHit.setRank(1); XContentBuilder builder = JsonXContent.contentBuilder(); searchHit.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -264,30 +277,42 @@ public void testSerializeShardTarget() throws Exception { hit2.shard(target); SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 1f); - - TransportVersion version = TransportVersionUtils.randomVersion(random()); - SearchHits results = copyWriteable(hits, getNamedWriteableRegistry(), SearchHits::new, version); - SearchShardTarget deserializedTarget = results.getAt(0).getShard(); - assertThat(deserializedTarget, equalTo(target)); - assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); - assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); - assertThat(results.getAt(0).getInnerHits().get("1").getAt(1).getShard(), notNullValue()); - assertThat(results.getAt(0).getInnerHits().get("2").getAt(0).getShard(), notNullValue()); - for (SearchHit hit : results) { - assertEquals(clusterAlias, hit.getClusterAlias()); - if (hit.getInnerHits() != null) { - for (SearchHits innerhits : hit.getInnerHits().values()) { - for (SearchHit innerHit : innerhits) { - assertEquals(clusterAlias, innerHit.getClusterAlias()); + try { + TransportVersion version = TransportVersionUtils.randomVersion(random()); + SearchHits results = copyWriteable( + hits, + getNamedWriteableRegistry(), + (StreamInput in) -> SearchHits.readFrom(in, randomBoolean()), + version + ); + try { + SearchShardTarget deserializedTarget = results.getAt(0).getShard(); + assertThat(deserializedTarget, equalTo(target)); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(1).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("2").getAt(0).getShard(), notNullValue()); + for (SearchHit hit : results) { + assertEquals(clusterAlias, hit.getClusterAlias()); + if (hit.getInnerHits() != null) { + for (SearchHits innerhits : hit.getInnerHits().values()) { + for (SearchHit innerHit : innerhits) { + assertEquals(clusterAlias, innerHit.getClusterAlias()); + } + } } } + assertThat(results.getAt(1).getShard(), equalTo(target)); + } finally { + results.decRef(); } + } finally { + hits.decRef(); } - assertThat(results.getAt(1).getShard(), equalTo(target)); } public void testNullSource() { - SearchHit searchHit = new SearchHit(0, "_id"); + SearchHit searchHit = SearchHit.unpooled(0, "_id"); assertThat(searchHit.getSourceAsMap(), nullValue()); assertThat(searchHit.getSourceRef(), nullValue()); @@ -299,7 +324,7 @@ public void testNullSource() { } public void testHasSource() { - SearchHit searchHit = new SearchHit(randomInt()); + SearchHit searchHit = SearchHit.unpooled(randomInt()); assertFalse(searchHit.hasSource()); searchHit.sourceRef(new BytesArray("{}")); assertTrue(searchHit.hasSource()); @@ -376,7 +401,7 @@ public void testToXContentEmptyFields() throws IOException { Map fields = new HashMap<>(); fields.put("foo", new DocumentField("foo", Collections.emptyList())); fields.put("bar", new DocumentField("bar", Collections.emptyList())); - SearchHit hit = new SearchHit(0, "_id"); + SearchHit hit = SearchHit.unpooled(0, "_id"); hit.addDocumentFields(fields, Map.of()); { BytesReference originalBytes = toShuffledXContent(hit, XContentType.JSON, ToXContent.EMPTY_PARAMS, randomBoolean()); @@ -389,13 +414,17 @@ public void testToXContentEmptyFields() throws IOException { assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); assertNull(parser.nextToken()); } - assertThat(parsed.getFields().size(), equalTo(0)); + try { + assertThat(parsed.getFields().size(), equalTo(0)); + } finally { + parsed.decRef(); + } } fields = new HashMap<>(); fields.put("foo", new DocumentField("foo", Collections.emptyList())); fields.put("bar", new DocumentField("bar", Collections.singletonList("value"))); - hit = new SearchHit(0, "_id"); + hit = SearchHit.unpooled(0, "_id"); hit.addDocumentFields(fields, Collections.emptyMap()); { BytesReference originalBytes = toShuffledXContent(hit, XContentType.JSON, ToXContent.EMPTY_PARAMS, randomBoolean()); @@ -412,7 +441,7 @@ public void testToXContentEmptyFields() throws IOException { Map metadata = new HashMap<>(); metadata.put("_routing", new DocumentField("_routing", Collections.emptyList())); - hit = new SearchHit(0, "_id"); + hit = SearchHit.unpooled(0, "_id"); hit.addDocumentFields(fields, Collections.emptyMap()); { BytesReference originalBytes = toShuffledXContent(hit, XContentType.JSON, ToXContent.EMPTY_PARAMS, randomBoolean()); @@ -427,7 +456,13 @@ public void testToXContentEmptyFields() throws IOException { assertThat(parsed.getFields().get("bar").getValues(), equalTo(Collections.singletonList("value"))); assertNull(parsed.getFields().get("_routing")); } + } + @Override + protected void dispose(SearchHit searchHit) { + if (searchHit != null) { + searchHit.decRef(); + } } static Explanation createExplanation(int depth) { diff --git a/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java b/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java index 1e720064dab56..4ca3c5b8dd46e 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java @@ -118,7 +118,7 @@ protected SearchHits mutateInstance(SearchHits instance) { } else { totalHits = null; } - return new SearchHits(instance.getHits(), totalHits, instance.getMaxScore()); + return new SearchHits(instance.asUnpooled().getHits(), totalHits, instance.getMaxScore()); case 2: final float maxScore; if (Float.isNaN(instance.getMaxScore())) { @@ -126,7 +126,7 @@ protected SearchHits mutateInstance(SearchHits instance) { } else { maxScore = Float.NaN; } - return new SearchHits(instance.getHits(), instance.getTotalHits(), maxScore); + return new SearchHits(instance.asUnpooled().getHits(), instance.getTotalHits(), maxScore); case 3: SortField[] sortFields; if (instance.getSortFields() == null) { @@ -135,7 +135,7 @@ protected SearchHits mutateInstance(SearchHits instance) { sortFields = randomBoolean() ? createSortFields(instance.getSortFields().length + 1) : null; } return new SearchHits( - instance.getHits(), + instance.asUnpooled().getHits(), instance.getTotalHits(), instance.getMaxScore(), sortFields, @@ -150,7 +150,7 @@ protected SearchHits mutateInstance(SearchHits instance) { collapseField = randomBoolean() ? instance.getCollapseField() + randomAlphaOfLengthBetween(2, 5) : null; } return new SearchHits( - instance.getHits(), + instance.asUnpooled().getHits(), instance.getTotalHits(), instance.getMaxScore(), instance.getSortFields(), @@ -165,7 +165,7 @@ protected SearchHits mutateInstance(SearchHits instance) { collapseValues = randomBoolean() ? createCollapseValues(instance.getCollapseValues().length + 1) : null; } return new SearchHits( - instance.getHits(), + instance.asUnpooled().getHits(), instance.getTotalHits(), instance.getMaxScore(), instance.getSortFields(), @@ -177,6 +177,11 @@ protected SearchHits mutateInstance(SearchHits instance) { } } + @Override + protected void dispose(SearchHits searchHits) { + searchHits.decRef(); + } + @Override protected Predicate getRandomFieldsExcludeFilter() { return path -> (path.isEmpty() @@ -193,7 +198,7 @@ protected String[] getShuffleFieldsExceptions() { @Override protected Writeable.Reader instanceReader() { - return SearchHits::new; + return in -> SearchHits.readFrom(in, randomBoolean()); } @Override @@ -223,15 +228,19 @@ protected SearchHits doParseInstance(XContentParser parser) throws IOException { SearchHits searchHits = SearchHits.fromXContent(parser); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); - return searchHits; + try { + return searchHits.asUnpooled(); + } finally { + searchHits.decRef(); + } } public void testToXContent() throws IOException { - SearchHit[] hits = new SearchHit[] { new SearchHit(1, "id1"), new SearchHit(2, "id2") }; + SearchHit[] hits = new SearchHit[] { SearchHit.unpooled(1, "id1"), SearchHit.unpooled(2, "id2") }; long totalHits = 1000; float maxScore = 1.5f; - SearchHits searchHits = new SearchHits(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), maxScore); + SearchHits searchHits = SearchHits.unpooled(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), maxScore); XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); ChunkedToXContent.wrapAsToXContent(searchHits).toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -251,7 +260,10 @@ public void testToXContent() throws IOException { public void testFromXContentWithShards() throws IOException { for (boolean withExplanation : new boolean[] { true, false }) { - final SearchHit[] hits = new SearchHit[] { new SearchHit(1, "id1"), new SearchHit(2, "id2"), new SearchHit(10, "id10") }; + final SearchHit[] hits = new SearchHit[] { + SearchHit.unpooled(1, "id1"), + SearchHit.unpooled(2, "id2"), + SearchHit.unpooled(10, "id10") }; for (SearchHit hit : hits) { String index = randomAlphaOfLengthBetween(5, 10); @@ -269,7 +281,7 @@ public void testFromXContentWithShards() throws IOException { long totalHits = 1000; float maxScore = 1.5f; - SearchHits searchHits = new SearchHits(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), maxScore); + SearchHits searchHits = SearchHits.unpooled(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), maxScore); XContentType xContentType = randomFrom(XContentType.values()).canonical(); BytesReference bytes = toShuffledXContent( ChunkedToXContent.wrapAsToXContent(searchHits), @@ -304,7 +316,6 @@ public void testFromXContentWithShards() throws IOException { } } } - } } } diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index 57974cff0d03c..6a8ac3d1aa876 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -425,6 +425,7 @@ public CheckedBiConsumer getReque "combined_fields", "dis_max", "exists", + "exact_knn", "function_score", "fuzzy", "geo_bounding_box", diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java index bbeeb855f8d18..99be8590e06f2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java @@ -11,10 +11,14 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LogDocMergePolicy; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; @@ -603,7 +607,10 @@ private void testSearchCase( final Consumer verify ) throws IOException { try (Directory directory = newDirectory()) { - try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + IndexWriterConfig config = LuceneTestCase.newIndexWriterConfig(random(), new MockAnalyzer(random())); + // Use LogDocMergePolicy to avoid randomization issues with the doc retrieval order. + config.setMergePolicy(new LogDocMergePolicy()); + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, config)) { indexSampleData(dataset, indexWriter, multipleSegments); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTopHitsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTopHitsTests.java index 7d3799b2db35d..1052987aabbdd 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTopHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTopHitsTests.java @@ -159,13 +159,13 @@ private InternalTopHits createTestInstance( Map searchHitFields = new HashMap<>(); scoreDocs[i] = docBuilder.apply(docId, score); - hits[i] = new SearchHit(docId, Integer.toString(i)); + hits[i] = SearchHit.unpooled(docId, Integer.toString(i)); hits[i].addDocumentFields(searchHitFields, Collections.emptyMap()); hits[i].score(score); } int totalHits = between(actualSize, 500000); sort(hits, scoreDocs, comparator); - SearchHits searchHits = new SearchHits(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), maxScore); + SearchHits searchHits = SearchHits.unpooled(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), maxScore); TopDocs topDocs = topDocsBuilder.apply(new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), scoreDocs); // Lucene's TopDocs initializes the maxScore to Float.NaN, if there is no maxScore @@ -276,16 +276,20 @@ protected void assertReduced(InternalTopHits reduced, List inpu new TotalHits(totalHits, relation), maxScore == Float.NEGATIVE_INFINITY ? Float.NaN : maxScore ); - assertEqualsWithErrorMessageFromXContent(expectedHits, actualHits); + try { + assertEqualsWithErrorMessageFromXContent(expectedHits, actualHits); + } finally { + expectedHits.decRef(); + } } public void testGetProperty() { // Create a SearchHit containing: { "foo": 1000.0 } and use it to initialize an InternalTopHits instance. - SearchHit hit = new SearchHit(0); + SearchHit hit = SearchHit.unpooled(0); hit = hit.sourceRef(Source.fromMap(Map.of("foo", 1000.0), XContentType.YAML).internalSourceRef()); hit.sortValues(new Object[] { 10.0 }, new DocValueFormat[] { DocValueFormat.RAW }); hit.score(1.0f); - SearchHits hits = new SearchHits(new SearchHit[] { hit }, null, 0); + SearchHits hits = SearchHits.unpooled(new SearchHit[] { hit }, null, 0); InternalTopHits internalTopHits = new InternalTopHits("test", 0, 0, null, hits, null); assertEquals(internalTopHits, internalTopHits.getProperty(Collections.emptyList())); @@ -301,7 +305,7 @@ public void testGetProperty() { expectThrows(IllegalArgumentException.class, () -> internalTopHits.getProperty(List.of("_sort"))); // Two SearchHit instances are not allowed, only the first will be used without assertion. - hits = new SearchHits(new SearchHit[] { hit, hit }, null, 0); + hits = SearchHits.unpooled(new SearchHit[] { hit, hit }, null, 0); InternalTopHits internalTopHits3 = new InternalTopHits("test", 0, 0, null, hits, null); expectThrows(IllegalArgumentException.class, () -> internalTopHits3.getProperty(List.of("foo"))); } @@ -397,7 +401,7 @@ protected InternalTopHits mutateInstance(InternalTopHits instance) { int from = instance.getFrom(); int size = instance.getSize(); TopDocsAndMaxScore topDocs = instance.getTopDocs(); - SearchHits searchHits = instance.getHits(); + SearchHits searchHits = instance.getHits().asUnpooled(); Map metadata = instance.getMetadata(); switch (between(0, 5)) { case 0 -> name += randomAlphaOfLength(5); @@ -415,7 +419,7 @@ protected InternalTopHits mutateInstance(InternalTopHits instance) { searchHits.getTotalHits().value + between(1, 100), randomFrom(TotalHits.Relation.values()) ); - searchHits = new SearchHits(searchHits.getHits(), totalHits, searchHits.getMaxScore() + randomFloat()); + searchHits = SearchHits.unpooled(searchHits.getHits(), totalHits, searchHits.getMaxScore() + randomFloat()); } case 5 -> { if (metadata == null) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java index f242e19012a35..0fe660e56822c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java @@ -194,7 +194,7 @@ public void testSerialization() throws IOException { backwardsCompatible.add(i); } - TDigestState serialized = writeToAndReadFrom(state, TransportVersions.V_8_500_020); + TDigestState serialized = writeToAndReadFrom(state, TransportVersions.V_8_9_X); assertEquals(serialized, state); TDigestState serializedBackwardsCompatible = writeToAndReadFrom(state, TransportVersions.V_8_8_1); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java b/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java index 4c8484be200e5..f8af8a2e3109b 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java @@ -81,6 +81,7 @@ protected SearchHit nextDoc(int doc) { assertThat(hits.length, equalTo(docs.length)); for (int i = 0; i < hits.length; i++) { assertThat(hits[i].docId(), equalTo(docs[i])); + hits[i].decRef(); } reader.close(); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhaseTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhaseTests.java index e0a26fbc67ffd..a5371e7b0b00a 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhaseTests.java @@ -82,7 +82,7 @@ public void testDocValueFetcher() throws IOException { for (LeafReaderContext context : reader.leaves()) { processor.setNextReader(context); for (int doc = 0; doc < context.reader().maxDoc(); doc++) { - SearchHit searchHit = new SearchHit(doc + context.docBase); + SearchHit searchHit = SearchHit.unpooled(doc + context.docBase); processor.process(new FetchSubPhase.HitContext(searchHit, context, doc, Map.of(), Source.empty(null))); assertNotNull(searchHit.getFields().get("field")); } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourcePhaseTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourcePhaseTests.java index 620706a01c88f..3a4d67ae281f2 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourcePhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourcePhaseTests.java @@ -170,7 +170,7 @@ private HitContext hitExecuteMultiple( when(sec.isSourceEnabled()).thenReturn(sourceBuilder != null); when(fetchContext.getSearchExecutionContext()).thenReturn(sec); - final SearchHit searchHit = new SearchHit(1, null, nestedIdentity); + final SearchHit searchHit = SearchHit.unpooled(1, null, nestedIdentity); // We don't need a real index, just a LeafReaderContext which cannot be mocked. MemoryIndex index = new MemoryIndex(); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java index 7a1751dbd41fc..be36d72304bd0 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java @@ -1169,7 +1169,7 @@ public void testNestedGrouping() throws IOException { """; var results = fetchFields(mapperService, source, fieldAndFormatList("*", null, false)); - SearchHit searchHit = new SearchHit(0); + SearchHit searchHit = SearchHit.unpooled(0); searchHit.addDocumentFields(results, Map.of()); assertThat(Strings.toString(searchHit), containsString("\"ml.top_classes\":")); } diff --git a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java index ed92bdb1e5919..b16e8f68c7e32 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java @@ -237,7 +237,7 @@ public void testChannelVersion() throws Exception { version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_8_0, TransportVersion.current()); } if (request.source() != null && request.source().subSearches().size() >= 2) { - version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_500_020, TransportVersion.current()); + version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_9_X, TransportVersion.current()); } request = copyWriteable(request, namedWriteableRegistry, ShardSearchRequest::new, version); channelVersion = TransportVersion.min(channelVersion, version); diff --git a/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java b/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java index f42ca49dc14b9..7aece1476a99d 100644 --- a/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java +++ b/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java @@ -67,7 +67,7 @@ public ThrowingQueryBuilder(StreamInput in) throws IOException { this.randomUID = in.readLong(); this.failure = in.readException(); this.shardId = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { this.index = in.readOptionalString(); } else { this.index = null; @@ -79,7 +79,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeLong(randomUID); out.writeException(failure); out.writeVInt(shardId); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { out.writeOptionalString(index); } } diff --git a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java index 8a82ae8ce7268..42fe65c8d14ef 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java @@ -54,6 +54,9 @@ public static Option createTestItem() { } Option option = new CompletionSuggestion.Entry.Option(docId, text, score, contexts); option.setHit(hit); + if (hit != null) { + hit.decRef(); + } return option; } diff --git a/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java new file mode 100644 index 0000000000000..02093d9fa0e44 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.vectors; + +import org.apache.lucene.queries.function.FunctionQuery; +import org.apache.lucene.queries.function.valuesource.FloatVectorSimilarityFunction; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.AbstractQueryTestCase; +import org.elasticsearch.test.TestGeoShapeFieldMapperPlugin; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Locale; + +public class ExactKnnQueryBuilderTests extends AbstractQueryTestCase { + + private static final String VECTOR_FIELD = "vector"; + private static final int VECTOR_DIMENSION = 3; + + protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject(VECTOR_FIELD) + .field("type", "dense_vector") + .field("dims", VECTOR_DIMENSION) + .field("index", true) + .field("similarity", "cosine") + .endObject() + .endObject() + .endObject(); + mapperService.merge( + MapperService.SINGLE_MAPPING_NAME, + new CompressedXContent(Strings.toString(builder)), + MapperService.MergeReason.MAPPING_UPDATE + ); + } + + @Override + protected Collection> getPlugins() { + return List.of(TestGeoShapeFieldMapperPlugin.class); + } + + @Override + protected ExactKnnQueryBuilder doCreateTestQueryBuilder() { + float[] query = new float[VECTOR_DIMENSION]; + for (int i = 0; i < VECTOR_DIMENSION; i++) { + query[i] = randomFloat(); + } + return new ExactKnnQueryBuilder(query, VECTOR_FIELD); + } + + @Override + public void testValidOutput() { + ExactKnnQueryBuilder query = new ExactKnnQueryBuilder(new float[] { 1.0f, 2.0f, 3.0f }, "field"); + String expected = """ + { + "exact_knn" : { + "query" : [ + 1.0, + 2.0, + 3.0 + ], + "field" : "field" + } + }"""; + assertEquals(expected, query.toString()); + } + + @Override + protected void doAssertLuceneQuery(ExactKnnQueryBuilder queryBuilder, Query query, SearchExecutionContext context) throws IOException { + assertTrue(query instanceof BooleanQuery); + BooleanQuery booleanQuery = (BooleanQuery) query; + boolean foundFunction = false; + for (BooleanClause clause : booleanQuery) { + if (clause.getQuery() instanceof FunctionQuery functionQuery) { + foundFunction = true; + assertTrue(functionQuery.getValueSource() instanceof FloatVectorSimilarityFunction); + String description = functionQuery.getValueSource().description().toLowerCase(Locale.ROOT); + if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(IndexVersions.NORMALIZED_VECTOR_COSINE)) { + assertTrue(description, description.contains("dot_product")); + } else { + assertTrue(description, description.contains("cosine")); + } + } + } + assertTrue("Unable to find FloatVectorSimilarityFunction in created BooleanQuery", foundFunction); + } + + @Override + public void testUnknownObjectException() { + // Test isn't relevant, since query is never parsed from xContent + } + + @Override + public void testFromXContent() throws IOException { + // Test isn't relevant, since query is never parsed from xContent + } + + @Override + public void testUnknownField() { + // Test isn't relevant, since query is never parsed from xContent + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java index eceafe6d12ac9..67bc6bde9c1af 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java @@ -23,8 +23,10 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.index.query.InnerHitsRewriteContext; import org.elasticsearch.index.query.MatchNoneQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.AbstractQueryTestCase; @@ -38,6 +40,7 @@ import java.util.List; import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; +import static org.elasticsearch.search.vectors.KnnSearchBuilderTests.randomVector; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -58,12 +61,20 @@ protected KnnScoreDocQueryBuilder doCreateTestQueryBuilder() { for (int doc = 0; doc < numDocs; doc++) { scoreDocs.add(new ScoreDoc(doc, randomFloat())); } - return new KnnScoreDocQueryBuilder(scoreDocs.toArray(new ScoreDoc[0])); + return new KnnScoreDocQueryBuilder( + scoreDocs.toArray(new ScoreDoc[0]), + randomBoolean() ? "field" : null, + randomBoolean() ? randomVector(10) : null + ); } @Override public void testValidOutput() { - KnnScoreDocQueryBuilder query = new KnnScoreDocQueryBuilder(new ScoreDoc[] { new ScoreDoc(0, 4.25f), new ScoreDoc(5, 1.6f) }); + KnnScoreDocQueryBuilder query = new KnnScoreDocQueryBuilder( + new ScoreDoc[] { new ScoreDoc(0, 4.25f), new ScoreDoc(5, 1.6f) }, + "field", + new float[] { 1.0f, 2.0f } + ); String expected = """ { "knn_score_doc" : { @@ -76,6 +87,11 @@ public void testValidOutput() { "doc" : 5, "score" : 1.6 } + ], + "field" : "field", + "query" : [ + 1.0, + 2.0 ] } }"""; @@ -144,11 +160,36 @@ public void testMustRewrite() throws IOException { } public void testRewriteToMatchNone() throws IOException { - KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder(new ScoreDoc[0]); - SearchExecutionContext context = createSearchExecutionContext(); + KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder( + new ScoreDoc[0], + randomBoolean() ? "field" : null, + randomBoolean() ? randomVector(10) : null + ); + QueryRewriteContext context = randomBoolean() + ? new InnerHitsRewriteContext(createSearchExecutionContext().getParserConfig(), System::currentTimeMillis) + : createSearchExecutionContext(); assertEquals(new MatchNoneQueryBuilder(), queryBuilder.rewrite(context)); } + public void testRewriteForInnerHits() throws IOException { + SearchExecutionContext context = createSearchExecutionContext(); + InnerHitsRewriteContext innerHitsRewriteContext = new InnerHitsRewriteContext(context.getParserConfig(), System::currentTimeMillis); + KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder( + new ScoreDoc[] { new ScoreDoc(0, 4.25f), new ScoreDoc(5, 1.6f) }, + randomAlphaOfLength(10), + randomVector(10) + ); + queryBuilder.boost(randomFloat()); + queryBuilder.queryName(randomAlphaOfLength(10)); + QueryBuilder rewritten = queryBuilder.rewrite(innerHitsRewriteContext); + assertTrue(rewritten instanceof ExactKnnQueryBuilder); + ExactKnnQueryBuilder exactKnnQueryBuilder = (ExactKnnQueryBuilder) rewritten; + assertEquals(queryBuilder.queryVector(), exactKnnQueryBuilder.getQuery()); + assertEquals(queryBuilder.fieldName(), exactKnnQueryBuilder.getField()); + assertEquals(queryBuilder.boost(), exactKnnQueryBuilder.boost(), 0.0001f); + assertEquals(queryBuilder.queryName(), exactKnnQueryBuilder.queryName()); + } + @Override public void testUnknownObjectException() { // Test isn't relevant, since query is never parsed from xContent @@ -185,7 +226,7 @@ public void testScoreDocQueryWeightCount() throws IOException { } ScoreDoc[] scoreDocs = scoreDocsList.toArray(new ScoreDoc[0]); - KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder(scoreDocs); + KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder(scoreDocs, "field", randomVector(10)); Query query = queryBuilder.doToQuery(context); final Weight w = query.createWeight(searcher, ScoreMode.TOP_SCORES, 1.0f); for (LeafReaderContext leafReaderContext : searcher.getLeafContexts()) { @@ -228,7 +269,7 @@ public void testScoreDocQuery() throws IOException { } ScoreDoc[] scoreDocs = scoreDocsList.toArray(new ScoreDoc[0]); - KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder(scoreDocs); + KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder(scoreDocs, "field", randomVector(10)); final Query query = queryBuilder.doToQuery(context); final Weight w = query.createWeight(searcher, ScoreMode.TOP_SCORES, 1.0f); diff --git a/server/src/test/java/org/elasticsearch/tasks/CancelTasksResponseTests.java b/server/src/test/java/org/elasticsearch/tasks/CancelTasksResponseTests.java deleted file mode 100644 index 793c1f60c38e6..0000000000000 --- a/server/src/test/java/org/elasticsearch/tasks/CancelTasksResponseTests.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.tasks; - -import org.elasticsearch.action.FailedNodeException; -import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.ChunkedToXContent; -import org.elasticsearch.test.AbstractXContentTestCase; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.net.ConnectException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.function.Predicate; -import java.util.function.Supplier; - -import static org.hamcrest.Matchers.equalTo; - -public class CancelTasksResponseTests extends AbstractXContentTestCase { - - // CancelTasksResponse doesn't directly implement ToXContent because it has multiple XContent representations, so we must wrap here - public record CancelTasksResponseWrapper(CancelTasksResponse in) implements ToXContentObject { - @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - return ChunkedToXContent.wrapAsToXContent(in.groupedByNone()).toXContent(builder, params); - } - } - - @Override - protected CancelTasksResponseWrapper createTestInstance() { - List randomTasks = randomTasks(); - return new CancelTasksResponseWrapper(new CancelTasksResponse(randomTasks, Collections.emptyList(), Collections.emptyList())); - } - - private static List randomTasks() { - List randomTasks = new ArrayList<>(); - for (int i = 0; i < randomInt(10); i++) { - randomTasks.add(TaskInfoTests.randomTaskInfo()); - } - return randomTasks; - } - - @Override - protected Predicate getRandomFieldsExcludeFilter() { - // status and headers hold arbitrary content, we can't inject random fields in them - return field -> field.endsWith("status") || field.endsWith("headers"); - } - - @Override - protected void assertEqualInstances(CancelTasksResponseWrapper expectedInstanceWrapper, CancelTasksResponseWrapper newInstanceWrapper) { - final var expectedInstance = expectedInstanceWrapper.in(); - final var newInstance = newInstanceWrapper.in(); - assertNotSame(expectedInstance, newInstance); - assertThat(newInstance.getTasks(), equalTo(expectedInstance.getTasks())); - ListTasksResponseTests.assertOnNodeFailures(newInstance.getNodeFailures(), expectedInstance.getNodeFailures()); - ListTasksResponseTests.assertOnTaskFailures(newInstance.getTaskFailures(), expectedInstance.getTaskFailures()); - } - - @Override - protected CancelTasksResponseWrapper doParseInstance(XContentParser parser) { - return new CancelTasksResponseWrapper(CancelTasksResponse.fromXContent(parser)); - } - - @Override - protected boolean supportsUnknownFields() { - return true; - } - - /** - * Test parsing {@link ListTasksResponse} with inner failures as they don't support asserting on xcontent equivalence, given that - * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} - * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. - */ - public void testFromXContentWithFailures() throws IOException { - Supplier instanceSupplier = CancelTasksResponseTests::createTestInstanceWithFailures; - // with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, - // but that does not bother our assertions, as we only want to test that we don't break. - boolean supportsUnknownFields = true; - // exceptions are not of the same type whenever parsed back - boolean assertToXContentEquivalence = false; - AbstractXContentTestCase.testFromXContent( - NUMBER_OF_TEST_RUNS, - instanceSupplier, - supportsUnknownFields, - Strings.EMPTY_ARRAY, - getRandomFieldsExcludeFilter(), - this::createParser, - this::doParseInstance, - this::assertEqualInstances, - assertToXContentEquivalence, - ToXContent.EMPTY_PARAMS - ); - } - - private static CancelTasksResponseWrapper createTestInstanceWithFailures() { - int numNodeFailures = randomIntBetween(0, 3); - List nodeFailures = new ArrayList<>(numNodeFailures); - for (int i = 0; i < numNodeFailures; i++) { - nodeFailures.add(new FailedNodeException(randomAlphaOfLength(5), "error message", new ConnectException())); - } - int numTaskFailures = randomIntBetween(0, 3); - List taskFailures = new ArrayList<>(numTaskFailures); - for (int i = 0; i < numTaskFailures; i++) { - taskFailures.add(new TaskOperationFailure(randomAlphaOfLength(5), randomLong(), new IllegalStateException())); - } - return new CancelTasksResponseWrapper(new CancelTasksResponse(randomTasks(), taskFailures, nodeFailures)); - } - -} diff --git a/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java index 5c355c8009d54..6be78f27135a5 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.telemetry.metric.MeterRegistry; import java.util.concurrent.CountDownLatch; @@ -33,7 +34,7 @@ public void testRejectedExecutionCounter() throws InterruptedException { .put("thread_pool." + threadPoolName + ".queue_size", queueSize) .build(); try { - threadPool = new ThreadPool(nodeSettings); + threadPool = new ThreadPool(nodeSettings, MeterRegistry.NOOP); // these tasks will consume the thread pool causing further // submissions to queue diff --git a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java index 8d7a486ee79f0..9a0c5c4b75d54 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.CheckedRunnable; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.hamcrest.Matcher; import java.util.HashMap; @@ -424,7 +425,7 @@ public void runScalingThreadPoolTest(final Settings settings, final BiConsumer { + private static final ConstructingObjectParser SINGLE_FEATURE_RESULT_PARSER = + new ConstructingObjectParser<>( + "feature_migration_status", + a -> new SingleFeatureMigrationResult((boolean) a[0], (String) a[1], (Exception) a[2]) + ); + + static { + SINGLE_FEATURE_RESULT_PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), SingleFeatureMigrationResult.SUCCESS_FIELD); + SINGLE_FEATURE_RESULT_PARSER.declareString( + ConstructingObjectParser.optionalConstructorArg(), + SingleFeatureMigrationResult.FAILED_INDEX_NAME_FIELD + ); + SINGLE_FEATURE_RESULT_PARSER.declareObject( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ElasticsearchException.fromXContent(p), + SingleFeatureMigrationResult.EXCEPTION_FIELD + ); + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + FeatureMigrationResults.TYPE, + a -> { + final Map statuses = ((List>) a[0]).stream() + .collect(Collectors.toMap(Tuple::v1, Tuple::v2)); + return new FeatureMigrationResults(statuses); + } + ); + + static { + PARSER.declareNamedObjects( + ConstructingObjectParser.constructorArg(), + (p, c, n) -> new Tuple<>(n, SINGLE_FEATURE_RESULT_PARSER.apply(p, c)), + v -> { + throw new IllegalArgumentException( + "ordered " + FeatureMigrationResults.RESULTS_FIELD.getPreferredName() + " are not supported" + ); + }, + FeatureMigrationResults.RESULTS_FIELD + ); + } + @Override protected FeatureMigrationResults createTestInstance() { return new FeatureMigrationResults(randomMap(0, 10, () -> new Tuple<>(randomAlphaOfLength(5), randomFeatureStatus()))); @@ -60,7 +107,7 @@ protected Writeable.Reader instanceReader() { @Override protected FeatureMigrationResults doParseInstance(XContentParser parser) throws IOException { - return FeatureMigrationResults.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java b/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java index 92b05ec9bf649..0b1ed05039a6d 100644 --- a/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java +++ b/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java @@ -53,7 +53,7 @@ public IndexError(StreamInput in) throws IOException { this.shardIds = in.readBoolean() ? in.readIntArray() : null; this.errorType = in.readEnum(ERROR_TYPE.class); this.message = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { this.stallTimeSeconds = in.readVInt(); } else { this.stallTimeSeconds = 0; @@ -69,7 +69,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeEnum(errorType); out.writeString(message); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { out.writeVInt(stallTimeSeconds); } } diff --git a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java index b12bcd8b55880..a5ace3e357f90 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java @@ -11,9 +11,9 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -466,7 +466,7 @@ private static void createSnapshotThatCanBeUsedDuringRecovery(String indexName) }, 60, TimeUnit.SECONDS); // Force merge to make sure that the resulting snapshot would contain the same index files as the safe commit - ForceMergeResponse forceMergeResponse = client().admin().indices().prepareForceMerge(indexName).setFlush(randomBoolean()).get(); + BroadcastResponse forceMergeResponse = client().admin().indices().prepareForceMerge(indexName).setFlush(randomBoolean()).get(); assertThat(forceMergeResponse.getTotalShards(), equalTo(forceMergeResponse.getSuccessfulShards())); // create repo diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java index 2a1cba66f79f9..b6415eea7db2c 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java @@ -16,7 +16,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.LuceneTestCase; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.network.InetAddresses; @@ -164,7 +164,7 @@ public final void testSnapshotWithLargeSegmentFiles() throws Exception { } flushAndRefresh(index); - ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); + BroadcastResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); assertThat(forceMerge.getSuccessfulShards(), equalTo(1)); assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); @@ -191,7 +191,7 @@ public void testRequestStats() throws Exception { } flushAndRefresh(index); - ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); + BroadcastResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); assertThat(forceMerge.getSuccessfulShards(), equalTo(1)); assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); diff --git a/test/framework/src/main/java/org/elasticsearch/search/fetch/HighlighterTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/fetch/HighlighterTestCase.java index a9f1ab7780f7f..526c2104e52ae 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/fetch/HighlighterTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/fetch/HighlighterTestCase.java @@ -73,7 +73,7 @@ protected final Map highlight(MapperService mapperServic Map> storedFields = storedFields(processor.storedFieldsSpec(), doc); Source source = Source.fromBytes(doc.source()); FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext( - new SearchHit(0, "id"), + SearchHit.unpooled(0, "id"), ir.leaves().get(0), 0, storedFields, diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/MetricRecorder.java b/test/framework/src/main/java/org/elasticsearch/telemetry/MetricRecorder.java index aa14d0067b68e..194a1a317742d 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/MetricRecorder.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/MetricRecorder.java @@ -11,6 +11,7 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.telemetry.metric.Instrument; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; @@ -106,6 +107,12 @@ public List getMeasurements(InstrumentType instrumentType, String n return metrics.get(instrumentType).called.getOrDefault(Objects.requireNonNull(name), Collections.emptyList()); } + public ArrayList getRegisteredMetrics(InstrumentType instrumentType) { + ArrayList registeredMetrics = new ArrayList<>(); + metrics.get(instrumentType).instruments.forEach((name, registration) -> { registeredMetrics.add(name); }); + return registeredMetrics; + } + /** * Get the {@link Registration} for a given elasticsearch {@link Instrument}. */ diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java b/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java index e237f6c9bbb4b..a4c73634dc102 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java @@ -15,6 +15,7 @@ import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.telemetry.tracing.Tracer; +import java.util.ArrayList; import java.util.List; /** @@ -41,6 +42,10 @@ public List getLongCounterMeasurement(String name) { return meter.getRecorder().getMeasurements(InstrumentType.LONG_COUNTER, name); } + public List getLongAsyncCounterMeasurement(String name) { + return meter.getRecorder().getMeasurements(InstrumentType.LONG_ASYNC_COUNTER, name); + } + public List getDoubleUpDownCounterMeasurement(String name) { return meter.getRecorder().getMeasurements(InstrumentType.DOUBLE_UP_DOWN_COUNTER, name); } @@ -65,10 +70,18 @@ public List getLongHistogramMeasurement(String name) { return meter.getRecorder().getMeasurements(InstrumentType.LONG_HISTOGRAM, name); } + public void collect() { + meter.getRecorder().collect(); + } + public void resetMeter() { meter.getRecorder().resetCalls(); } + public ArrayList getRegisteredMetrics(InstrumentType instrumentType) { + return meter.getRecorder().getRegisteredMetrics(instrumentType); + } + @Override public TelemetryProvider getTelemetryProvider(Settings settings) { return new TelemetryProvider() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java index 9d151e690b071..5dc707e94bdd7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -117,7 +116,7 @@ protected void cancelSearch(String action) { TaskInfo searchTask = listTasksResponse.getTasks().get(0); logger.info("Cancelling search"); - CancelTasksResponse cancelTasksResponse = clusterAdmin().prepareCancelTasks().setTargetTaskId(searchTask.taskId()).get(); + ListTasksResponse cancelTasksResponse = clusterAdmin().prepareCancelTasks().setTargetTaskId(searchTask.taskId()).get(); assertThat(cancelTasksResponse.getTasks(), hasSize(1)); assertThat(cancelTasksResponse.getTasks().get(0).taskId(), equalTo(searchTask.taskId())); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java index 770c56f9c5952..4df1e745f3bf4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java @@ -32,7 +32,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; public abstract class AbstractXContentTestCase extends ESTestCase { - protected static final int NUMBER_OF_TEST_RUNS = 20; + public static final int NUMBER_OF_TEST_RUNS = 20; public static XContentTester xContentTester( CheckedBiFunction createParser, diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 175594ac8210f..65b28ad874431 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -34,10 +34,7 @@ import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.segments.IndexSegments; import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; @@ -57,6 +54,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.internal.AdminClient; @@ -1478,9 +1476,9 @@ protected final DocWriteResponse index(String index, String id, String source) { * * @see #waitForRelocation() */ - protected final RefreshResponse refresh(String... indices) { + protected final BroadcastResponse refresh(String... indices) { waitForRelocation(); - RefreshResponse actionGet = indicesAdmin().prepareRefresh(indices) + BroadcastResponse actionGet = indicesAdmin().prepareRefresh(indices) .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED) .get(); assertNoFailures(actionGet); @@ -1498,9 +1496,9 @@ protected final void flushAndRefresh(String... indices) { /** * Flush some or all indices in the cluster. */ - protected final FlushResponse flush(String... indices) { + protected final BroadcastResponse flush(String... indices) { waitForRelocation(); - FlushResponse actionGet = indicesAdmin().prepareFlush(indices).get(); + BroadcastResponse actionGet = indicesAdmin().prepareFlush(indices).get(); for (DefaultShardOperationFailedException failure : actionGet.getShardFailures()) { assertThat("unexpected flush failure " + failure.reason(), failure.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); } @@ -1510,9 +1508,9 @@ protected final FlushResponse flush(String... indices) { /** * Waits for all relocations and force merge all indices in the cluster to 1 segment. */ - protected ForceMergeResponse forceMerge() { + protected BroadcastResponse forceMerge() { waitForRelocation(); - ForceMergeResponse actionGet = indicesAdmin().prepareForceMerge().setMaxNumSegments(1).get(); + BroadcastResponse actionGet = indicesAdmin().prepareForceMerge().setMaxNumSegments(1).get(); assertNoFailures(actionGet); return actionGet; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index c072f5643a5cd..e3feefea0d8cc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -122,6 +122,8 @@ import org.elasticsearch.xcontent.XContentParser.Token; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; +import org.hamcrest.Matcher; +import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; import org.junit.After; import org.junit.AfterClass; @@ -2109,6 +2111,18 @@ protected static boolean isTurkishLocale() { || Locale.getDefault().getLanguage().equals(new Locale("az").getLanguage()); } + /* + * Assert.assertThat (inherited from LuceneTestCase superclass) has been deprecated. + * So make sure that all assertThat references use the non-deprecated version. + */ + public static void assertThat(T actual, Matcher matcher) { + MatcherAssert.assertThat(actual, matcher); + } + + public static void assertThat(String reason, T actual, Matcher matcher) { + MatcherAssert.assertThat(reason, actual, matcher); + } + public static T fail(Throwable t, String msg, Object... args) { throw new AssertionError(org.elasticsearch.common.Strings.format(msg, args), t); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index a2806663ff321..f9996bfc91204 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -32,8 +32,9 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; +import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -72,6 +73,7 @@ import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.DeprecationHandler; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; @@ -129,7 +131,6 @@ import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; import static org.elasticsearch.cluster.ClusterState.VERSION_INTRODUCING_TRANSPORT_VERSIONS; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.test.rest.TestFeatureService.ALL_FEATURES; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; @@ -230,7 +231,22 @@ public enum ProductFeature { private static EnumSet availableFeatures; private static Set nodesVersions; - private static TestFeatureService testFeatureService = ALL_FEATURES; + + private static final TestFeatureService ALL_FEATURES = new TestFeatureService() { + @Override + public boolean clusterHasFeature(String featureId) { + return true; + } + + @Override + public Set getAllSupportedFeatures() { + throw new UnsupportedOperationException( + "Only available to properly initialized TestFeatureService. See ESRestTestCase#createTestFeatureService" + ); + } + }; + + protected static TestFeatureService testFeatureService = ALL_FEATURES; protected static Set getCachedNodesVersions() { assert nodesVersions != null; @@ -1265,15 +1281,33 @@ protected void refreshAllIndices() throws IOException { client().performRequest(refreshRequest); } - protected static RefreshResponse refresh(String index) throws IOException { + protected static BroadcastResponse refresh(String index) throws IOException { return refresh(client(), index); } - protected static RefreshResponse refresh(RestClient client, String index) throws IOException { + private static final ConstructingObjectParser BROADCAST_RESPONSE_PARSER = new ConstructingObjectParser<>( + "broadcast_response", + true, + arg -> { + BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; + return new BroadcastResponse( + response.getTotalShards(), + response.getSuccessfulShards(), + response.getFailedShards(), + Arrays.asList(response.getShardFailures()) + ); + } + ); + + static { + BaseBroadcastResponse.declareBroadcastFields(BROADCAST_RESPONSE_PARSER); + } + + protected static BroadcastResponse refresh(RestClient client, String index) throws IOException { Request refreshRequest = new Request("POST", "/" + index + "/_refresh"); Response response = client.performRequest(refreshRequest); try (var parser = responseAsParser(response)) { - return RefreshResponse.fromXContent(parser); + return BROADCAST_RESPONSE_PARSER.apply(parser, null); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java index a73c43f4fc46a..c8647f4e9c43b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java @@ -9,6 +9,7 @@ package org.elasticsearch.test.rest; import org.elasticsearch.Version; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.features.FeatureData; import org.elasticsearch.features.FeatureSpecification; @@ -22,6 +23,7 @@ class ESRestTestFeatureService implements TestFeatureService { private final Predicate historicalFeaturesPredicate; private final Set clusterStateFeatures; + private final Set allSupportedFeatures; ESRestTestFeatureService( List specs, @@ -31,6 +33,12 @@ class ESRestTestFeatureService implements TestFeatureService { var minNodeVersion = nodeVersions.stream().min(Comparator.naturalOrder()); var featureData = FeatureData.createFromSpecifications(specs); var historicalFeatures = featureData.getHistoricalFeatures(); + Set allHistoricalFeatures = historicalFeatures.lastEntry() == null ? Set.of() : historicalFeatures.lastEntry().getValue(); + + this.allSupportedFeatures = Sets.union(clusterStateFeatures, minNodeVersion.>map(v -> { + var historicalFeaturesForVersion = historicalFeatures.floorEntry(v); + return historicalFeaturesForVersion == null ? Set.of() : historicalFeaturesForVersion.getValue(); + }).orElse(allHistoricalFeatures)); this.historicalFeaturesPredicate = minNodeVersion.>map( v -> featureId -> hasHistoricalFeature(historicalFeatures, v, featureId) @@ -43,10 +51,16 @@ private static boolean hasHistoricalFeature(NavigableMap> h return features != null && features.getValue().contains(featureId); } + @Override public boolean clusterHasFeature(String featureId) { if (clusterStateFeatures.contains(featureId)) { return true; } return historicalFeaturesPredicate.test(featureId); } + + @Override + public Set getAllSupportedFeatures() { + return allSupportedFeatures; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java index fcd2f781ec58d..ca7684e60d281 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java @@ -57,10 +57,10 @@ public class RestTestLegacyFeatures implements FeatureSpecification { public static final NodeFeature ML_MEMORY_OVERHEAD_FIXED = new NodeFeature("ml.memory_overhead_fixed"); // QA - rolling upgrade tests + public static final NodeFeature DESIRED_NODE_API_SUPPORTED = new NodeFeature("desired_node_supported"); public static final NodeFeature SECURITY_UPDATE_API_KEY = new NodeFeature("security.api_key_update"); public static final NodeFeature SECURITY_BULK_UPDATE_API_KEY = new NodeFeature("security.api_key_bulk_update"); @UpdateForV9 - public static final NodeFeature WATCHES_VERSION_IN_META = new NodeFeature("watcher.version_in_meta"); @UpdateForV9 public static final NodeFeature SECURITY_ROLE_DESCRIPTORS_OPTIONAL = new NodeFeature("security.role_descriptors_optional"); @@ -76,6 +76,27 @@ public class RestTestLegacyFeatures implements FeatureSpecification { @UpdateForV9 public static final NodeFeature ML_ANALYTICS_MAPPINGS = new NodeFeature("ml.analytics_mappings"); + public static final NodeFeature TSDB_NEW_INDEX_FORMAT = new NodeFeature("indices.tsdb_new_format"); + public static final NodeFeature TSDB_GENERALLY_AVAILABLE = new NodeFeature("indices.tsdb_supported"); + + /* + * A composable index template with no template defined in the body is mistakenly always assumed to not be a time series template. + * Fixed in #98840 + */ + public static final NodeFeature TSDB_EMPTY_TEMPLATE_FIXED = new NodeFeature("indices.tsdb_empty_composable_template_fixed"); + public static final NodeFeature SYNTHETIC_SOURCE_SUPPORTED = new NodeFeature("indices.synthetic_source"); + + public static final NodeFeature DESIRED_BALANCED_ALLOCATOR_SUPPORTED = new NodeFeature("allocator.desired_balance"); + + /* + * Cancel shard allocation command is broken for initial desired balance versions + * and might allocate shard on the node where it is not supposed to be. This + * is fixed by https://github.com/elastic/elasticsearch/pull/93635. + */ + public static final NodeFeature DESIRED_BALANCED_ALLOCATOR_FIXED = new NodeFeature("allocator.desired_balance_fixed"); + public static final NodeFeature INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED = new NodeFeature("settings.indexing_slowlog_level_removed"); + public static final NodeFeature DEPRECATION_WARNINGS_LEAK_FIXED = new NodeFeature("deprecation_warnings_leak_fixed"); + // YAML public static final NodeFeature REST_ELASTIC_PRODUCT_HEADER_PRESENT = new NodeFeature("action.rest.product_header_present"); @@ -103,7 +124,16 @@ public Map getHistoricalFeatures() { entry(TRANSFORM_NEW_API_ENDPOINT, Version.V_7_5_0), entry(ML_INDICES_HIDDEN, Version.V_7_7_0), entry(ML_ANALYTICS_MAPPINGS, Version.V_7_3_0), - entry(REST_ELASTIC_PRODUCT_HEADER_PRESENT, Version.V_8_0_1) + entry(REST_ELASTIC_PRODUCT_HEADER_PRESENT, Version.V_8_0_1), + entry(DESIRED_NODE_API_SUPPORTED, Version.V_8_1_0), + entry(TSDB_NEW_INDEX_FORMAT, Version.V_8_2_0), + entry(SYNTHETIC_SOURCE_SUPPORTED, Version.V_8_4_0), + entry(DESIRED_BALANCED_ALLOCATOR_SUPPORTED, Version.V_8_6_0), + entry(DESIRED_BALANCED_ALLOCATOR_FIXED, Version.V_8_7_1), + entry(TSDB_GENERALLY_AVAILABLE, Version.V_8_7_0), + entry(TSDB_EMPTY_TEMPLATE_FIXED, Version.V_8_11_0), + entry(INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED, Version.V_8_0_0), + entry(DEPRECATION_WARNINGS_LEAK_FIXED, Version.V_7_17_9) ); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java b/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java index 9de1fcf631520..332a00ce895a0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java @@ -8,8 +8,10 @@ package org.elasticsearch.test.rest; +import java.util.Set; + public interface TestFeatureService { boolean clusterHasFeature(String featureId); - TestFeatureService ALL_FEATURES = ignored -> true; + Set getAllSupportedFeatures(); } diff --git a/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java b/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java index e8a853989e8e5..ce8e3a2574f3e 100644 --- a/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java +++ b/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Releasable; import org.elasticsearch.node.Node; +import org.elasticsearch.telemetry.metric.MeterRegistry; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; @@ -30,7 +31,7 @@ public TestThreadPool(String name, ExecutorBuilder... customBuilders) { } public TestThreadPool(String name, Settings settings, ExecutorBuilder... customBuilders) { - super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), customBuilders); + super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), MeterRegistry.NOOP, customBuilders); } @Override diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java index 6e8397c816b3b..94b80fcc3fab3 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java @@ -29,6 +29,11 @@ private static class MockTestFeatureService implements TestFeatureService { public boolean clusterHasFeature(String featureId) { return true; } + + @Override + public Set getAllSupportedFeatures() { + return Set.of(); + } } public void testHeadersSupportStashedValueReplacement() throws IOException { diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java index b0e8b8ae05b51..61917220f10d1 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java @@ -83,7 +83,7 @@ protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBu public BoxplotAggregationBuilder(StreamInput in) throws IOException { super(in); compression = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { executionHint = in.readOptionalWriteable(TDigestExecutionHint::readFrom); } else { executionHint = TDigestExecutionHint.HIGH_ACCURACY; @@ -98,7 +98,7 @@ public Set metricNames() { @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeDouble(compression); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(executionHint); } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java index f3af195bc6fa1..dc4b096f3a08e 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java @@ -63,7 +63,7 @@ public InternalResetTrackingRate(StreamInput in) throws IOException { this.startTime = in.readLong(); this.endTime = in.readLong(); this.resetCompensation = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { this.rateUnit = Rounding.DateTimeUnit.resolve(in.readByte()); } else { this.rateUnit = Rounding.DateTimeUnit.SECOND_OF_MINUTE; @@ -82,7 +82,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeLong(startTime); out.writeLong(endTime); out.writeDouble(resetCompensation); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) && rateUnit != null) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) && rateUnit != null) { out.writeByte(rateUnit.getId()); } else { out.writeByte(Rounding.DateTimeUnit.SECOND_OF_MINUTE.getId()); diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java index f528d99133756..6ec287fe2b980 100644 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java +++ b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.apmdata; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; @@ -19,7 +21,6 @@ import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.yaml.YamlXContent; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.template.IndexTemplateRegistry; import org.elasticsearch.xpack.core.template.IngestPipelineConfig; @@ -37,12 +38,14 @@ * Creates all index templates and ingest pipelines that are required for using Elastic APM. */ public class APMIndexTemplateRegistry extends IndexTemplateRegistry { + private static final Logger logger = LogManager.getLogger(APMIndexTemplateRegistry.class); + private final int version; private final Map componentTemplates; private final Map composableIndexTemplates; private final List ingestPipelines; - private final boolean enabled; + private volatile boolean enabled; @SuppressWarnings("unchecked") public APMIndexTemplateRegistry( @@ -75,8 +78,6 @@ public APMIndexTemplateRegistry( Map.Entry> pipelineConfig = map.entrySet().iterator().next(); return loadIngestPipeline(pipelineConfig.getKey(), version, (List) pipelineConfig.getValue().get("dependencies")); }).collect(Collectors.toList()); - - enabled = XPackSettings.APM_DATA_ENABLED.get(nodeSettings); } catch (IOException e) { throw new RuntimeException(e); } @@ -86,6 +87,11 @@ public int getVersion() { return version; } + void setEnabled(boolean enabled) { + logger.info("APM index template registry is {}", enabled ? "enabled" : "disabled"); + this.enabled = enabled; + } + public boolean isEnabled() { return enabled; } diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java index 7acf3a3c972da..f905c17c04b4c 100644 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java +++ b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java @@ -10,36 +10,62 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xpack.core.XPackSettings; import java.util.Collection; +import java.util.Collections; import java.util.List; public class APMPlugin extends Plugin implements ActionPlugin { private static final Logger logger = LogManager.getLogger(APMPlugin.class); - private final SetOnce registry = new SetOnce<>(); + final SetOnce registry = new SetOnce<>(); + + private final boolean enabled; + + // APM_DATA_REGISTRY_ENABLED controls enabling the index template registry. + // + // This setting will be ignored if the plugin is disabled. + static final Setting APM_DATA_REGISTRY_ENABLED = Setting.boolSetting( + "xpack.apm_data.registry.enabled", + true, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + public APMPlugin(Settings settings) { + this.enabled = XPackSettings.APM_DATA_ENABLED.get(settings); + } @Override public Collection createComponents(PluginServices services) { + logger.info("APM ingest plugin is {}", enabled ? "enabled" : "disabled"); + Settings settings = services.environment().settings(); + ClusterService clusterService = services.clusterService(); registry.set( - new APMIndexTemplateRegistry( - services.environment().settings(), - services.clusterService(), - services.threadPool(), - services.client(), - services.xContentRegistry() - ) + new APMIndexTemplateRegistry(settings, clusterService, services.threadPool(), services.client(), services.xContentRegistry()) ); - APMIndexTemplateRegistry registryInstance = registry.get(); - logger.info("APM ingest plugin is {}", registryInstance.isEnabled() ? "enabled" : "disabled"); - registryInstance.initialize(); - return List.of(registryInstance); + if (enabled) { + APMIndexTemplateRegistry registryInstance = registry.get(); + registryInstance.setEnabled(APM_DATA_REGISTRY_ENABLED.get(settings)); + clusterService.getClusterSettings().addSettingsUpdateConsumer(APM_DATA_REGISTRY_ENABLED, registryInstance::setEnabled); + registryInstance.initialize(); + } + return Collections.emptyList(); } @Override public void close() { registry.get().close(); } + + @Override + public List> getSettings() { + return List.of(APM_DATA_REGISTRY_ENABLED); + } } diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml index 0ebbb99a1e379..3d9c1490e5a86 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml @@ -11,12 +11,12 @@ composed_of: - apm@mappings - apm@settings - apm-10d@lifecycle -- apm@custom +- logs@custom - logs-apm.app@custom - ecs@mappings ignore_missing_component_templates: +- logs@custom - logs-apm.app@custom -- apm@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml index 831f7cc404415..4adcf125b2df9 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml @@ -13,12 +13,12 @@ composed_of: - apm@settings - apm-10d@lifecycle - logs-apm.error@mappings -- apm@custom +- logs@custom - logs-apm.error@custom - ecs@mappings ignore_missing_component_templates: +- logs@custom - logs-apm.error@custom -- apm@custom template: mappings: properties: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml index bdd1fa363bcf4..c2233469110f8 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml @@ -13,11 +13,11 @@ composed_of: - apm-90d@lifecycle - metrics-apm@mappings - metrics-apm@settings -- apm@custom +- metrics@custom - metrics-apm.app@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom +- metrics@custom - metrics-apm.app@custom template: settings: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml index 205784e22e685..3d6d05c58e780 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml @@ -14,11 +14,11 @@ composed_of: - apm-90d@lifecycle - metrics-apm@mappings - metrics-apm@settings -- apm@custom +- metrics@custom - metrics-apm.internal@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom +- metrics@custom - metrics-apm.internal@custom template: settings: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml index 6279e044fbfcf..f234b60b1a6ec 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_destination@mappings -- apm@custom -- metrics-apm.service_destination@custom +- metrics@custom +- metrics-apm.service_destination.10m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_destination@custom +- metrics@custom +- metrics-apm.service_destination.10m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml index 10e4ca5b39a52..aa4f212532e56 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml @@ -15,12 +15,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_destination@mappings -- apm@custom -- metrics-apm.service_destination@custom +- metrics@custom +- metrics-apm.service_destination.1m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_destination@custom +- metrics@custom +- metrics-apm.service_destination.1m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml index dbac0d0d17d89..9b1a26486f482 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_destination@mappings -- apm@custom -- metrics-apm.service_destination@custom +- metrics@custom +- metrics-apm.service_destination.60m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_destination@custom +- metrics@custom +- metrics-apm.service_destination.60m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml index af99e419d4a56..c37ec93651d9d 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_summary@mappings -- apm@custom -- metrics-apm.service_summary@custom +- metrics@custom +- metrics-apm.service_summary.10m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_summary@custom +- metrics@custom +- metrics-apm.service_summary.10m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml index 29c28953d6b40..3a99bc8472c66 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml @@ -15,12 +15,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_summary@mappings -- apm@custom -- metrics-apm.service_summary@custom +- metrics@custom +- metrics-apm.service_summary.1m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_summary@custom +- metrics@custom +- metrics-apm.service_summary.1m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml index bdbd4900df3bb..d829967f7eddf 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_summary@mappings -- apm@custom -- metrics-apm.service_summary@custom +- metrics@custom +- metrics-apm.service_summary.60m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_summary@custom +- metrics@custom +- metrics-apm.service_summary.60m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml index 8b4e88391a475..bc21b35d4777f 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_transaction@mappings -- apm@custom -- metrics-apm.service_transaction@custom +- metrics@custom +- metrics-apm.service_transaction.10m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_transaction@custom +- metrics@custom +- metrics-apm.service_transaction.10m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml index 811067f8e6f30..87a1e254baea7 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml @@ -15,12 +15,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_transaction@mappings -- apm@custom -- metrics-apm.service_transaction@custom +- metrics@custom +- metrics-apm.service_transaction.1m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_transaction@custom +- metrics@custom +- metrics-apm.service_transaction.1m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml index db28b7c56aaab..b45ce0ec0fad7 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_transaction@mappings -- apm@custom -- metrics-apm.service_transaction@custom +- metrics@custom +- metrics-apm.service_transaction.60m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_transaction@custom +- metrics@custom +- metrics-apm.service_transaction.60m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml index 548f73656fda4..51d3c90cb4af8 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.transaction@mappings -- apm@custom -- metrics-apm.transaction@custom +- metrics@custom +- metrics-apm.transaction.10m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.transaction@custom +- metrics@custom +- metrics-apm.transaction.10m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml index 6206e7c126c48..8825a93db28dc 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml @@ -15,12 +15,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.transaction@mappings -- apm@custom -- metrics-apm.transaction@custom +- metrics@custom +- metrics-apm.transaction.1m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.transaction@custom +- metrics@custom +- metrics-apm.transaction.1m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml index 4ad00aecf23a5..e6657fbfe5d28 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.transaction@mappings -- apm@custom -- metrics-apm.transaction@custom +- metrics@custom +- metrics-apm.transaction.60m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.transaction@custom +- metrics@custom +- metrics-apm.transaction.60m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml index 174faf432eb6e..174aec8c5515a 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml @@ -14,14 +14,12 @@ composed_of: - apm-90d@lifecycle - traces-apm@mappings - traces-apm.rum@mappings -- apm@custom -- traces-apm@custom +- traces@custom - traces-apm.rum@custom - ecs@mappings ignore_missing_component_templates: +- traces@custom - traces-apm.rum@custom -- traces-apm@custom -- apm@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml index 8c65c69bc3afa..a39d10897a2ed 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml @@ -11,10 +11,12 @@ composed_of: - traces@mappings - apm@mappings - apm@settings -- apm@custom +- traces@custom +- traces-apm.sampled@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom +- traces@custom +- traces-apm.sampled@custom template: lifecycle: data_retention: 1h diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml index fb6670a7f7143..de9c47dfd3f1b 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml @@ -13,12 +13,12 @@ composed_of: - apm@settings - apm-10d@lifecycle - traces-apm@mappings -- apm@custom +- traces@custom - traces-apm@custom - ecs@mappings ignore_missing_component_templates: +- traces@custom - traces-apm@custom -- apm@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/ingest-pipelines/apm@pipeline.yaml b/x-pack/plugin/apm-data/src/main/resources/ingest-pipelines/apm@pipeline.yaml index 40161e6ddcbf4..3c4dbfe99951f 100644 --- a/x-pack/plugin/apm-data/src/main/resources/ingest-pipelines/apm@pipeline.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/ingest-pipelines/apm@pipeline.yaml @@ -14,7 +14,6 @@ processors: database_file: GeoLite2-City.mmdb field: client.ip target_field: client.geo - download_database_on_pipeline_creation: false ignore_missing: true on_failure: - remove: diff --git a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java index 7dcd6fdd807e4..4f6a5b58ff38d 100644 --- a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java +++ b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.features.FeatureService; import org.elasticsearch.ingest.IngestMetadata; @@ -55,12 +56,15 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; +import java.util.stream.Stream; -import static org.elasticsearch.xpack.core.XPackSettings.APM_DATA_ENABLED; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.not; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -75,20 +79,28 @@ public class APMIndexTemplateRegistryTests extends ESTestCase { @Before public void createRegistryAndClient() { + final ClusterSettings clusterSettings = new ClusterSettings( + Settings.EMPTY, + Stream.concat(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), Set.of(APMPlugin.APM_DATA_REGISTRY_ENABLED).stream()) + .collect(Collectors.toSet()) + ); + threadPool = new TestThreadPool(this.getClass().getName()); client = new VerifyingClient(threadPool); - clusterService = ClusterServiceUtils.createClusterService(threadPool); + clusterService = ClusterServiceUtils.createClusterService(threadPool, clusterSettings); FeatureService featureService = new FeatureService(List.of()); stackTemplateRegistryAccessor = new StackTemplateRegistryAccessor( new StackTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, NamedXContentRegistry.EMPTY, featureService) ); + apmIndexTemplateRegistry = new APMIndexTemplateRegistry( - Settings.builder().put(APM_DATA_ENABLED.getKey(), true).build(), + Settings.EMPTY, clusterService, threadPool, client, NamedXContentRegistry.EMPTY ); + apmIndexTemplateRegistry.setEnabled(true); } @After @@ -111,6 +123,28 @@ public void testThatMissingMasterNodeDoesNothing() { apmIndexTemplateRegistry.clusterChanged(event); } + public void testThatDisablingRegistryDoesNothing() throws Exception { + DiscoveryNode node = DiscoveryNodeUtils.create("node"); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); + + apmIndexTemplateRegistry.setEnabled(false); + assertThat(apmIndexTemplateRegistry.getComponentTemplateConfigs().entrySet(), hasSize(0)); + assertThat(apmIndexTemplateRegistry.getComposableTemplateConfigs().entrySet(), hasSize(0)); + assertThat(apmIndexTemplateRegistry.getIngestPipelines(), hasSize(0)); + + client.setVerifier((a, r, l) -> { + fail("if the registry is disabled nothing should happen"); + return null; + }); + ClusterChangedEvent event = createClusterChangedEvent(Map.of(), Map.of(), nodes); + apmIndexTemplateRegistry.clusterChanged(event); + + apmIndexTemplateRegistry.setEnabled(true); + assertThat(apmIndexTemplateRegistry.getComponentTemplateConfigs().entrySet(), not(hasSize(0))); + assertThat(apmIndexTemplateRegistry.getComposableTemplateConfigs().entrySet(), not(hasSize(0))); + assertThat(apmIndexTemplateRegistry.getIngestPipelines(), not(hasSize(0))); + } + public void testThatIndependentTemplatesAreAddedImmediatelyIfMissing() throws Exception { DiscoveryNode node = DiscoveryNodeUtils.create("node"); DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); @@ -249,6 +283,48 @@ public void testIndexTemplates() throws Exception { assertThat(actualInstalledIngestPipelines.get(), equalTo(0)); } + public void testIndexTemplateConventions() throws Exception { + for (Map.Entry entry : apmIndexTemplateRegistry.getComposableTemplateConfigs().entrySet()) { + final String name = entry.getKey(); + final int atIndex = name.lastIndexOf('@'); + assertThat(atIndex, not(equalTo(-1))); + assertThat(name.substring(atIndex + 1), equalTo("template")); + + final String dataStreamType = name.substring(0, name.indexOf('-')); + assertThat(dataStreamType, isIn(List.of("logs", "metrics", "traces"))); + + final ComposableIndexTemplate template = entry.getValue(); + assertThat(template.indexPatterns().size(), equalTo(1)); + + final String namePrefix = name.substring(0, atIndex); + switch (namePrefix) { + case "logs-apm.app", "metrics-apm.app": + // These two data streams have a service-specific dataset. + assertThat(template.indexPatterns().get(0), equalTo(namePrefix + ".*-*")); + break; + default: + assertThat(template.indexPatterns().get(0), equalTo(namePrefix + "-*")); + break; + } + + // Each index template should be composed of the following optional component templates: + // @custom + // -@custom + final List optionalComponentTemplates = template.composedOf() + .stream() + .filter(t -> template.getIgnoreMissingComponentTemplates().contains(t)) + .toList(); + assertThat(optionalComponentTemplates, containsInAnyOrder(namePrefix + "@custom", dataStreamType + "@custom")); + + // There should be no required custom component templates. + final List requiredCustomComponentTemplates = template.getRequiredComponentTemplates() + .stream() + .filter(t -> t.endsWith("@custom")) + .toList(); + assertThat(requiredCustomComponentTemplates, empty()); + } + } + private Map getIndependentComponentTemplateConfigs() { return apmIndexTemplateRegistry.getComponentTemplateConfigs().entrySet().stream().filter(template -> { Settings settings = template.getValue().template().settings(); diff --git a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMPluginTests.java b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMPluginTests.java new file mode 100644 index 0000000000000..289852737393e --- /dev/null +++ b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMPluginTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.apmdata; + +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.XPackSettings; +import org.junit.After; +import org.junit.Before; + +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class APMPluginTests extends ESTestCase { + private APMPlugin apmPlugin; + private ClusterService clusterService; + private ThreadPool threadPool; + + @Before + public void createPlugin() { + final ClusterSettings clusterSettings = new ClusterSettings( + Settings.EMPTY, + Stream.concat(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), Set.of(APMPlugin.APM_DATA_REGISTRY_ENABLED).stream()) + .collect(Collectors.toSet()) + ); + threadPool = new TestThreadPool(this.getClass().getName()); + clusterService = ClusterServiceUtils.createClusterService(threadPool, clusterSettings); + apmPlugin = new APMPlugin(Settings.builder().put(XPackSettings.APM_DATA_ENABLED.getKey(), true).build()); + } + + private void createComponents() { + Environment mockEnvironment = mock(Environment.class); + when(mockEnvironment.settings()).thenReturn(Settings.builder().build()); + Plugin.PluginServices services = mock(Plugin.PluginServices.class); + when(services.clusterService()).thenReturn(clusterService); + when(services.threadPool()).thenReturn(threadPool); + when(services.environment()).thenReturn(mockEnvironment); + apmPlugin.createComponents(services); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + apmPlugin.close(); + threadPool.shutdownNow(); + } + + public void testRegistryEnabledSetting() throws Exception { + createComponents(); + + // By default, the registry is enabled. + assertTrue(apmPlugin.registry.get().isEnabled()); + + // The registry can be disabled/enabled dynamically. + clusterService.getClusterSettings() + .applySettings(Settings.builder().put(APMPlugin.APM_DATA_REGISTRY_ENABLED.getKey(), false).build()); + assertFalse(apmPlugin.registry.get().isEnabled()); + } + + public void testDisablingPluginDisablesRegistry() throws Exception { + apmPlugin = new APMPlugin(Settings.builder().put(XPackSettings.APM_DATA_ENABLED.getKey(), false).build()); + createComponents(); + + // The plugin is disabled, so the registry is disabled too. + assertFalse(apmPlugin.registry.get().isEnabled()); + + // The registry can not be enabled dynamically when the plugin is disabled. + clusterService.getClusterSettings() + .applySettings(Settings.builder().put(APMPlugin.APM_DATA_REGISTRY_ENABLED.getKey(), true).build()); + assertFalse(apmPlugin.registry.get().isEnabled()); + } +} diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/30_custom_templates.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/30_custom_templates.yml new file mode 100644 index 0000000000000..62b36926d01dc --- /dev/null +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/30_custom_templates.yml @@ -0,0 +1,76 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid + + - do: + cluster.put_component_template: + name: "metrics@custom" + body: + template: + mappings: + properties: + custom_field1: + type: keyword + meta: + source: metrics@custom + custom_field2: + type: keyword + meta: + source: metrics@custom + + - do: + cluster.put_component_template: + name: "metrics-apm.app@custom" + body: + template: + mappings: + properties: + custom_field2: + type: keyword + meta: + source: metrics-apm.app@custom + custom_field3: + type: keyword + meta: + source: metrics-apm.app@custom + +--- +"Test metrics @custom component templates": + - do: + indices.create_data_stream: + name: metrics-apm.app.svc1-testing + - do: + # Wait for cluster state changes to be applied before + # querying field mappings. + cluster.health: + wait_for_events: languid + - do: + indices.get_field_mapping: + index: metrics-apm.app.svc1-testing + fields: custom_field* + - set: {_arbitrary_key_: index} + - match: + $body.$index.mappings: + custom_field1: + full_name: custom_field1 + mapping: + custom_field1: + type: keyword + meta: + source: metrics@custom + custom_field2: + full_name: custom_field2 + mapping: + custom_field2: + type: keyword + meta: + source: metrics-apm.app@custom + custom_field3: + full_name: custom_field3 + mapping: + custom_field3: + type: keyword + meta: + source: metrics-apm.app@custom diff --git a/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java b/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java index 1819ad7960006..88ae09fbcdc99 100644 --- a/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java +++ b/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java @@ -192,7 +192,7 @@ private SearchHit[] getSearchHits(String asyncId, String user) throws IOExceptio ) ).getSearchResponse(); try { - return searchResponse.getHits().getHits(); + return searchResponse.getHits().asUnpooled().getHits(); } finally { searchResponse.decRef(); } diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index bb3dc5b866b54..3605d6365f867 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; @@ -1318,7 +1317,7 @@ public void testCancelViaTasksAPI() throws Exception { SearchListenerPlugin.waitSearchStarted(); - ActionFuture cancelFuture; + ActionFuture cancelFuture; try { ListTasksResponse listTasksResponse = client(LOCAL_CLUSTER).admin() .cluster() diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java index 481f5c79ba2ed..04b0b11ad38d4 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java @@ -13,7 +13,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.search.CCSSingleCoordinatorSearchProgressListener; import org.elasticsearch.action.search.SearchProgressActionListener; import org.elasticsearch.action.search.SearchRequest; @@ -155,7 +155,7 @@ public void cancelTask(Runnable runnable, String reason) { CancelTasksRequest req = new CancelTasksRequest().setTargetTaskId(searchId.getTaskId()).setReason(reason); client.admin().cluster().cancelTasks(req, new ActionListener<>() { @Override - public void onResponse(CancelTasksResponse cancelTasksResponse) { + public void onResponse(ListTasksResponse cancelTasksResponse) { runnable.run(); } @@ -499,6 +499,19 @@ public void onFinalReduce(List shards, TotalHits totalHits, Interna searchResponse.get().updatePartialResponse(shards.size(), totalHits, () -> aggregations, reducePhase); } + /** + * Indicates that a cluster has finished a search operation. Used for CCS minimize_roundtrips=true only. + * + * @param clusterAlias alias of cluster that has finished a search operation and returned a SearchResponse. + * The cluster alias for the local cluster is RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY. + * @param clusterResponse SearchResponse from cluster 'clusterAlias' + */ + @Override + public void onClusterResponseMinimizeRoundtrips(String clusterAlias, SearchResponse clusterResponse) { + // no need to call the delegate progress listener, since this method is only called for minimize_roundtrips=true + searchResponse.get().updateResponseMinimizeRoundtrips(clusterAlias, clusterResponse); + } + @Override public void onResponse(SearchResponse response) { searchResponse.get().updateFinalResponse(response, ccsMinimizeRoundtrips); diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java index e50a4ce1ed94f..de360fd1c1bd4 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java @@ -11,6 +11,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponse.Clusters; +import org.elasticsearch.action.search.SearchResponseMerger; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -19,6 +20,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xpack.core.search.action.AsyncSearchResponse; import org.elasticsearch.xpack.core.search.action.AsyncStatusResponse; @@ -61,7 +63,20 @@ class MutableSearchResponse implements Releasable { private SearchResponse finalResponse; private ElasticsearchException failure; private Map> responseHeaders; - + /** + * Set to true when the local cluster has completed (its full SearchResponse + * has been received. Only used for CCS minimize_roundtrips=true. + */ + private boolean localClusterComplete; + /** + * For CCS minimize_roundtrips=true, we collect SearchResponses from each cluster in + * order to provide partial results before all clusters have reported back results. + */ + private List clusterResponses; + /** + * Set to true when the final SearchResponse has been received + * or a fatal error has occurred. + */ private boolean frozen; /** @@ -81,11 +96,16 @@ class MutableSearchResponse implements Releasable { this.isPartial = true; this.threadContext = threadContext; this.totalHits = EMPTY_TOTAL_HITS; + this.localClusterComplete = false; } /** * Updates the response with the result of a partial reduction. + * + * @param successfulShards + * @param totalHits * @param reducedAggs is a strategy for producing the reduced aggs + * @param reducePhase */ @SuppressWarnings("HiddenField") synchronized void updatePartialResponse( @@ -128,6 +148,24 @@ assert shardsInResponseMatchExpected(response, ccsMinimizeRoundtrips) this.frozen = true; } + /** + * Indicates that a cluster has finished a search operation. Used for CCS minimize_roundtrips=true only. + * + * @param clusterAlias alias of cluster that has finished a search operation and returned a SearchResponse. + * The cluster alias for the local cluster is RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY. + * @param clusterResponse SearchResponse from cluster 'clusterAlias' + */ + synchronized void updateResponseMinimizeRoundtrips(String clusterAlias, SearchResponse clusterResponse) { + if (clusterResponses == null) { + clusterResponses = new ArrayList<>(); + } + clusterResponses.add(clusterResponse); + clusterResponse.mustIncRef(); + if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias)) { + localClusterComplete = true; + } + } + private boolean isPartialResponse(SearchResponse response) { if (response.getClusters() == null) { return true; @@ -190,6 +228,7 @@ synchronized AsyncSearchResponse toAsyncSearchResponse(AsyncSearchTask task, lon if (restoreResponseHeaders && responseHeaders != null) { restoreResponseHeadersContext(threadContext, responseHeaders); } + SearchResponse searchResponse; if (finalResponse != null) { // We have a final response, use it. @@ -199,16 +238,43 @@ synchronized AsyncSearchResponse toAsyncSearchResponse(AsyncSearchTask task, lon // An error occurred before we got the shard list searchResponse = null; } else { - /* - * Build the response, reducing aggs if we haven't already and - * storing the result of the reduction, so we won't have to reduce - * the same aggregation results a second time if nothing has changed. - * This does cost memory because we have a reference to the finally - * reduced aggs sitting around which can't be GCed until we get an update. - */ - InternalAggregations reducedAggs = reducedAggsSource.get(); - reducedAggsSource = () -> reducedAggs; - searchResponse = buildResponse(task.getStartTimeNanos(), reducedAggs); + // partial results branch + SearchResponseMerger searchResponseMerger = createSearchResponseMerger(task); + try { + if (searchResponseMerger == null) { // local-only search or CCS MRT=false + /* + * Build the response, reducing aggs if we haven't already and + * storing the result of the reduction, so we won't have to reduce + * the same aggregation results a second time if nothing has changed. + * This does cost memory because we have a reference to the finally + * reduced aggs sitting around which can't be GCed until we get an update. + */ + InternalAggregations reducedAggs = reducedAggsSource.get(); + reducedAggsSource = () -> reducedAggs; + searchResponse = buildResponse(task.getStartTimeNanos(), reducedAggs); + } else if (localClusterComplete == false) { + /* + * For CCS MRT=true and the local cluster has reported back only partial results + * (subset of shards), so use SearchResponseMerger to do a merge of any full results that + * have come in from remote clusters and the partial results of the local cluster + */ + InternalAggregations reducedAggs = reducedAggsSource.get(); + reducedAggsSource = () -> reducedAggs; + SearchResponse partialAggsSearchResponse = buildResponse(task.getStartTimeNanos(), reducedAggs); + try { + searchResponse = getMergedResponse(searchResponseMerger, partialAggsSearchResponse); + } finally { + partialAggsSearchResponse.decRef(); + } + } else { + // For CCS MRT=true when the local cluster has reported back full results (via updateResponseMinimizeRoundtrips) + searchResponse = getMergedResponse(searchResponseMerger); + } + } finally { + if (searchResponseMerger != null) { + searchResponseMerger.close(); + } + } } try { return new AsyncSearchResponse( @@ -227,6 +293,41 @@ synchronized AsyncSearchResponse toAsyncSearchResponse(AsyncSearchTask task, lon } } + /** + * Creates a SearchResponseMerger from the Supplier of {@link SearchResponseMerger} held by the AsyncSearchTask. + * The supplier will be null for local-only searches and CCS minimize_roundtrips=true. In those cases, + * this method returns null. + * + * Otherwise, it creates a new SearchResponseMerger and populates it with all the SearchResponses + * received so far (via the updateResponseMinimizeRoundtrips method). + * + * @param task holds the Supplier of SearchResponseMerger + * @return SearchResponseMerger with all responses collected to so far or null + * (for local-only/CCS minimize_roundtrips=false) + */ + private SearchResponseMerger createSearchResponseMerger(AsyncSearchTask task) { + if (task.getSearchResponseMergerSupplier() == null) { + return null; // local search and CCS minimize_roundtrips=false + } + return task.getSearchResponseMergerSupplier().get(); + } + + private SearchResponse getMergedResponse(SearchResponseMerger merger) { + return getMergedResponse(merger, null); + } + + private SearchResponse getMergedResponse(SearchResponseMerger merger, SearchResponse localPartialAggsOnly) { + if (clusterResponses != null) { + for (SearchResponse response : clusterResponses) { + merger.add(response); + } + } + if (localPartialAggsOnly != null) { + merger.add(localPartialAggsOnly); + } + return merger.getMergedResponse(clusters); + } + /** * Creates an {@link AsyncStatusResponse} -- status of an async response. * Response is created based on the current state of the mutable response or based on {@code finalResponse} if it is available. @@ -373,9 +474,14 @@ private String getShardsInResponseMismatchInfo(SearchResponse response, boolean } @Override - public void close() { + public synchronized void close() { if (finalResponse != null) { finalResponse.decRef(); } + if (clusterResponses != null) { + for (SearchResponse clusterResponse : clusterResponses) { + clusterResponse.decRef(); + } + } } } diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java index 5f724509ec98a..2406fc6b4e92a 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; +import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.TestShardRoutingRoleStrategies; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -337,7 +338,8 @@ public void testScaleWhileShrinking() throws Exception { assertBusy(() -> { refreshClusterInfo(); final ClusterInfo clusterInfo = getClusterInfo(); - final long freeBytes = clusterInfo.getNodeMostAvailableDiskUsages().get(dataNode2Id).getFreeBytes(); + DiskUsage usage = clusterInfo.getNodeMostAvailableDiskUsages().get(dataNode2Id); + final long freeBytes = usage.freeBytes(); assertThat(freeBytes, is(equalTo(enoughSpaceForColocation))); }); diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityService.java index 672ddad9ea189..d88fa19b18f49 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityService.java @@ -300,7 +300,7 @@ private boolean nodeHasAccurateCapacity(DiscoveryNode node) { DiskUsage mostAvailable = clusterInfo.getNodeMostAvailableDiskUsages().get(node.getId()); DiskUsage leastAvailable = clusterInfo.getNodeLeastAvailableDiskUsages().get(node.getId()); if (mostAvailable == null - || mostAvailable.getPath().equals(leastAvailable.getPath()) == false + || mostAvailable.path().equals(leastAvailable.path()) == false || totalStorage(clusterInfo.getNodeMostAvailableDiskUsages(), node) < 0) { return false; } @@ -340,7 +340,7 @@ private AutoscalingCapacity.AutoscalingResources resourcesFor(DiscoveryNode node private static long totalStorage(Map diskUsages, DiscoveryNode node) { DiskUsage diskUsage = diskUsages.get(node.getId()); - return diskUsage != null ? diskUsage.getTotalBytes() : -1; + return diskUsage != null ? diskUsage.totalBytes() : -1; } private boolean rolesFilter(DiscoveryNode discoveryNode) { diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java index 7eb3cca18efd0..ffa3a7308da90 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java @@ -681,9 +681,8 @@ long unmovableSize(String nodeId, Collection shards) { return 0; } - long threshold = diskThresholdSettings.getFreeBytesThresholdHighStage(ByteSizeValue.ofBytes(diskUsage.getTotalBytes())) - .getBytes(); - long missing = threshold - diskUsage.getFreeBytes(); + long threshold = diskThresholdSettings.getFreeBytesThresholdHighStage(ByteSizeValue.ofBytes(diskUsage.totalBytes())).getBytes(); + long missing = threshold - diskUsage.freeBytes(); return Math.max(missing, shards.stream().mapToLong(this::sizeOf).min().orElseThrow()); } @@ -980,7 +979,7 @@ public static class ReactiveReason implements AutoscalingDeciderResult.Reason { static final int MAX_AMOUNT_OF_SHARDS = 512; private static final TransportVersion SHARD_IDS_OUTPUT_VERSION = TransportVersions.V_8_4_0; - private static final TransportVersion UNASSIGNED_NODE_DECISIONS_OUTPUT_VERSION = TransportVersions.V_8_500_020; + private static final TransportVersion UNASSIGNED_NODE_DECISIONS_OUTPUT_VERSION = TransportVersions.V_8_9_X; private final String reason; private final long unassigned; diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityServiceTests.java index bbd8e7ddc5a53..4061d37832184 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityServiceTests.java @@ -248,7 +248,7 @@ public void testContext() { DiskUsage diskUsage = new DiskUsage(nodeId, null, randomAlphaOfLength(5), total, randomLongBetween(0, total)); leastUsages.put(nodeId, diskUsage); if (randomBoolean()) { - diskUsage = new DiskUsage(nodeId, null, diskUsage.getPath(), total, diskUsage.getFreeBytes()); + diskUsage = new DiskUsage(nodeId, null, diskUsage.path(), total, diskUsage.freeBytes()); } mostUsages.put(nodeId, diskUsage); sumTotal += total; @@ -305,9 +305,9 @@ public void testContext() { new DiskUsage( multiPathNodeId, null, - randomValueOtherThan(original.getPath(), () -> randomAlphaOfLength(5)), - original.getTotalBytes(), - original.getFreeBytes() + randomValueOtherThan(original.path(), () -> randomAlphaOfLength(5)), + original.totalBytes(), + original.freeBytes() ) ); diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ByteRange.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ByteRange.java index f58f61c987143..7395a3203b315 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ByteRange.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ByteRange.java @@ -23,7 +23,7 @@ private ByteRange(long start, long end) { this.start = start; this.end = end; assert start >= 0L : "Start must be >= 0 but saw [" + start + "]"; - assert end >= start : "End must be greater or equal to start but saw [" + start + "][" + start + "]"; + assert end >= start : "End must be greater or equal to start but saw [" + end + "][" + start + "]"; } public long start() { diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 5e8933f86ae7d..01847a3205870 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.blobcache.BlobCacheMetrics; @@ -59,6 +60,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.LongAdder; import java.util.function.IntConsumer; +import java.util.function.LongSupplier; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -253,6 +255,13 @@ public void validate(ByteSizeValue value, Map, Object> settings, bool Setting.Property.NodeScope ); + // used in tests + void computeDecay() { + if (cache instanceof LFUCache lfuCache) { + lfuCache.computeDecay(); + } + } + private interface Cache extends Releasable { CacheEntry get(K cacheKey, long fileLength, int region); @@ -302,6 +311,8 @@ private CacheEntry(T chunk) { private final BlobCacheMetrics blobCacheMetrics; + private final LongSupplier relativeTimeInMillisSupplier; + public SharedBlobCacheService( NodeEnvironment environment, Settings settings, @@ -309,7 +320,7 @@ public SharedBlobCacheService( String ioExecutor, BlobCacheMetrics blobCacheMetrics ) { - this(environment, settings, threadPool, ioExecutor, ioExecutor, blobCacheMetrics); + this(environment, settings, threadPool, ioExecutor, ioExecutor, blobCacheMetrics, threadPool::relativeTimeInMillis); } public SharedBlobCacheService( @@ -318,7 +329,8 @@ public SharedBlobCacheService( ThreadPool threadPool, String ioExecutor, String bulkExecutor, - BlobCacheMetrics blobCacheMetrics + BlobCacheMetrics blobCacheMetrics, + LongSupplier relativeTimeInMillisSupplier ) { this.threadPool = threadPool; this.ioExecutor = threadPool.executor(ioExecutor); @@ -360,6 +372,7 @@ public SharedBlobCacheService( this.recoveryRangeSize = BlobCacheUtils.toIntBytes(SHARED_CACHE_RECOVERY_RANGE_SIZE_SETTING.get(settings).getBytes()); this.blobCacheMetrics = blobCacheMetrics; + this.relativeTimeInMillisSupplier = relativeTimeInMillisSupplier; } public static long calculateCacheSize(Settings settings, long totalFsSize) { @@ -428,6 +441,10 @@ private int getRegionSize(long fileLength, int region) { return effectiveRegionSize; } + public int getRegionSize() { + return regionSize; + } + CacheFileRegion get(KeyType cacheKey, long fileLength, int region) { return cache.get(cacheKey, fileLength, region).chunk; } @@ -495,6 +512,61 @@ public boolean maybeFetchFullEntry(KeyType cacheKey, long length, RangeMissingHa return true; } + /** + * Fetch and write in cache a region of a blob if there are enough free pages in the cache to do so. + * + * This method returns as soon as the download tasks are instantiated, but the tasks themselves + * are run on the bulk executor. + * + * If an exception is thrown from the writer then the cache entry being downloaded is freed + * and unlinked + * + * @param cacheKey the key to fetch data for + * @param region the region of the blob to fetch + * @param blobLength the length of the blob from which the region is fetched (used to compute the size of the ending region) + * @param writer a writer that handles writing of newly downloaded data to the shared cache + * @param listener a listener that is completed with {@code true} if the current thread triggered the fetching of the region, in which + * case the data is available in cache. The listener is completed with {@code false} in every other cases: if the + * region to write is already available in cache, if the region is pending fetching via another thread or if there is + * not enough free pages to fetch the region. + */ + public void maybeFetchRegion( + final KeyType cacheKey, + final int region, + final long blobLength, + final RangeMissingHandler writer, + final ActionListener listener + ) { + if (freeRegionCount() < 1 && maybeEvictLeastUsed() == false) { + // no free page available and no old enough unused region to be evicted + listener.onResponse(false); + return; + } + long regionLength = regionSize; + try { + if (region == getEndingRegion(blobLength)) { + regionLength = blobLength - getRegionStart(region); + } + ByteRange regionRange = ByteRange.of(0, regionLength); + if (regionRange.isEmpty()) { + listener.onResponse(false); + return; + } + final CacheFileRegion entry = get(cacheKey, blobLength, region); + entry.populate(regionRange, writer, bulkIOExecutor, listener); + } catch (Exception e) { + listener.onFailure(e); + } + } + + // used by tests + boolean maybeEvictLeastUsed() { + if (cache instanceof LFUCache lfuCache) { + return lfuCache.maybeEvictLeastUsed(); + } + return false; + } + private static void throwAlreadyClosed(String message) { throw new AlreadyClosedException(message); } @@ -540,6 +612,10 @@ int getFreq(CacheFileRegion cacheFileRegion) { return -1; } + private long relativeTimeInMillis() { + return relativeTimeInMillisSupplier.getAsLong(); + } + @Override public void close() { sharedBytes.decRef(); @@ -666,6 +742,50 @@ boolean tryRead(ByteBuffer buf, long offset) throws IOException { return true; } + /** + * Populates a range in cache if the range is not available nor pending to be available in cache. + * + * @param rangeToWrite the range of bytes to populate + * @param writer a writer that handles writing of newly downloaded data to the shared cache + * @param executor the executor used to download and to write new dat + * @param listener a listener that is completed with {@code true} if the current thread triggered the download and write of the + * range, in which case the listener is completed once writing is done. The listener is completed with {@code false} + * if the range to write is already available in cache or if another thread will download and write the range, in + * which cases the listener is completed immediately. + */ + void populate( + final ByteRange rangeToWrite, + final RangeMissingHandler writer, + final Executor executor, + final ActionListener listener + ) { + Releasable resource = null; + try { + incRef(); + resource = Releasables.releaseOnce(this::decRef); + ensureOpen(); + final List gaps = tracker.waitForRange( + rangeToWrite, + rangeToWrite, + Assertions.ENABLED ? ActionListener.releaseAfter(ActionListener.running(() -> { + assert regionOwners.get(io) == this; + }), resource) : ActionListener.releasing(resource) + ); + final var hasGapsToFill = gaps.size() > 0; + try (RefCountingListener refs = new RefCountingListener(listener.map(unused -> hasGapsToFill))) { + if (hasGapsToFill) { + final var cacheFileRegion = CacheFileRegion.this; + for (SparseFileTracker.Gap gap : gaps) { + var fillGapRunnable = fillGapRunnable(cacheFileRegion, writer, gap); + executor.execute(ActionRunnable.run(refs.acquire(), fillGapRunnable::run)); + } + } + } + } catch (Exception e) { + releaseAndFail(listener, resource, e); + } + } + void populateAndRead( final ByteRange rangeToWrite, final ByteRange rangeToRead, @@ -701,51 +821,50 @@ void populateAndRead( ); if (gaps.isEmpty() == false) { - fillGaps(executor, writer, gaps); + final var cacheFileRegion = CacheFileRegion.this; + for (SparseFileTracker.Gap gap : gaps) { + executor.execute(fillGapRunnable(cacheFileRegion, writer, gap)); + } } } catch (Exception e) { releaseAndFail(listener, resource, e); } } - private void fillGaps(Executor executor, RangeMissingHandler writer, List gaps) { - final var cacheFileRegion = CacheFileRegion.this; - for (SparseFileTracker.Gap gap : gaps) { - executor.execute(new AbstractRunnable() { - - @Override - protected void doRun() throws Exception { - ensureOpen(); - if (cacheFileRegion.tryIncRef() == false) { - throw new AlreadyClosedException("File chunk [" + cacheFileRegion.regionKey + "] has been released"); - } - try { - final int start = Math.toIntExact(gap.start()); - var ioRef = io; - assert regionOwners.get(ioRef) == cacheFileRegion; - writer.fillCacheRange( - ioRef, - start, - start, - Math.toIntExact(gap.end() - start), - progress -> gap.onProgress(start + progress) - ); - writeCount.increment(); - } finally { - cacheFileRegion.decRef(); - } - gap.onCompletion(); + private AbstractRunnable fillGapRunnable(CacheFileRegion cacheFileRegion, RangeMissingHandler writer, SparseFileTracker.Gap gap) { + return new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + ensureOpen(); + if (cacheFileRegion.tryIncRef() == false) { + throw new AlreadyClosedException("File chunk [" + cacheFileRegion.regionKey + "] has been released"); } - - @Override - public void onFailure(Exception e) { - gap.onFailure(e); + try { + final int start = Math.toIntExact(gap.start()); + var ioRef = io; + assert regionOwners.get(ioRef) == cacheFileRegion; + writer.fillCacheRange( + ioRef, + start, + start, + Math.toIntExact(gap.end() - start), + progress -> gap.onProgress(start + progress) + ); + writeCount.increment(); + } finally { + cacheFileRegion.decRef(); } - }); - } + gap.onCompletion(); + } + + @Override + public void onFailure(Exception e) { + gap.onFailure(e); + } + }; } - private static void releaseAndFail(ActionListener listener, Releasable decrementRef, Exception e) { + private static void releaseAndFail(ActionListener listener, Releasable decrementRef, Exception e) { try { Releasables.close(decrementRef); } catch (Exception ex) { @@ -997,10 +1116,11 @@ class LFUCacheEntry extends CacheEntry { LFUCacheEntry(CacheFileRegion chunk, long lastAccessed) { super(chunk); this.lastAccessed = lastAccessed; + this.freq = 1; } void touch() { - long now = threadPool.relativeTimeInMillis(); + long now = relativeTimeInMillis(); if (now - lastAccessed >= minTimeDelta) { maybePromote(now, this); } @@ -1034,7 +1154,7 @@ int getFreq(CacheFileRegion cacheFileRegion) { @Override public LFUCacheEntry get(KeyType cacheKey, long fileLength, int region) { final RegionKey regionKey = new RegionKey<>(cacheKey, region); - final long now = threadPool.relativeTimeInMillis(); + final long now = relativeTimeInMillis(); // try to just get from the map on the fast-path to save instantiating the capturing lambda needed on the slow path // if we did not find an entry var entry = keyMapping.get(regionKey); @@ -1097,7 +1217,7 @@ private LFUCacheEntry initChunk(LFUCacheEntry entry) { throwAlreadyClosed("no free region found (contender)"); } // new item - assert entry.freq == 0; + assert entry.freq == 1; assert entry.prev == null; assert entry.next == null; final SharedBytes.IO freeSlot = freeRegions.poll(); @@ -1264,9 +1384,29 @@ private int maybeEvict() { return -1; } + /** + * This method tries to evict the least used {@link LFUCacheEntry}. Only entries with the lowest possible frequency are considered + * for eviction. + * + * @return true if an entry was evicted, false otherwise. + */ + public boolean maybeEvictLeastUsed() { + synchronized (SharedBlobCacheService.this) { + for (LFUCacheEntry entry = freqs[0]; entry != null; entry = entry.next) { + boolean evicted = entry.chunk.tryEvict(); + if (evicted && entry.chunk.io != null) { + unlink(entry); + keyMapping.remove(entry.chunk.regionKey, entry); + return true; + } + } + } + return false; + } + private void computeDecay() { synchronized (SharedBlobCacheService.this) { - long now = threadPool.relativeTimeInMillis(); + long now = relativeTimeInMillis(); for (int i = 0; i < maxFreq; i++) { for (LFUCacheEntry entry = freqs[i]; entry != null; entry = entry.next) { if (entry.freq > 0 && now - entry.lastAccessed >= 2 * minTimeDelta) { diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java index d861ff193112d..fa58ab58ac95c 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java @@ -9,6 +9,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.blobcache.BlobCacheMetrics; import org.elasticsearch.blobcache.common.ByteRange; @@ -32,6 +33,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Objects; @@ -260,29 +262,35 @@ public void testDecay() throws IOException { final var region1 = cacheService.get(cacheKey2, size(250), 1); assertEquals(3, cacheService.freeRegionCount()); - assertEquals(0, cacheService.getFreq(region0)); - assertEquals(0, cacheService.getFreq(region1)); + assertEquals(1, cacheService.getFreq(region0)); + assertEquals(1, cacheService.getFreq(region1)); taskQueue.advanceTime(); taskQueue.runAllRunnableTasks(); final var region0Again = cacheService.get(cacheKey1, size(250), 0); assertSame(region0Again, region0); - assertEquals(1, cacheService.getFreq(region0)); - assertEquals(0, cacheService.getFreq(region1)); + assertEquals(2, cacheService.getFreq(region0)); + assertEquals(1, cacheService.getFreq(region1)); taskQueue.advanceTime(); taskQueue.runAllRunnableTasks(); cacheService.get(cacheKey1, size(250), 0); - assertEquals(2, cacheService.getFreq(region0)); + assertEquals(3, cacheService.getFreq(region0)); cacheService.get(cacheKey1, size(250), 0); - assertEquals(2, cacheService.getFreq(region0)); + assertEquals(3, cacheService.getFreq(region0)); // advance 2 ticks (decay only starts after 2 ticks) taskQueue.advanceTime(); taskQueue.runAllRunnableTasks(); taskQueue.advanceTime(); taskQueue.runAllRunnableTasks(); + assertEquals(2, cacheService.getFreq(region0)); + assertEquals(0, cacheService.getFreq(region1)); + + // advance another tick + taskQueue.advanceTime(); + taskQueue.runAllRunnableTasks(); assertEquals(1, cacheService.getFreq(region0)); assertEquals(0, cacheService.getFreq(region1)); @@ -407,7 +415,8 @@ public void execute(Runnable command) { threadPool, ThreadPool.Names.GENERIC, "bulk", - BlobCacheMetrics.NOOP + BlobCacheMetrics.NOOP, + threadPool::relativeTimeInMillis ) ) { { @@ -468,7 +477,8 @@ public ExecutorService executor(String name) { threadPool, ThreadPool.Names.GENERIC, "bulk", - BlobCacheMetrics.NOOP + BlobCacheMetrics.NOOP, + threadPool::relativeTimeInMillis ) ) { @@ -697,6 +707,291 @@ public void testCacheSizeChanges() throws IOException { } } + public void testMaybeEvictLeastUsed() throws Exception { + final int numRegions = 3; + randomIntBetween(1, 500); + final long regionSize = size(1L); + Settings settings = Settings.builder() + .put(NODE_NAME_SETTING.getKey(), "node") + .put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(numRegions)).getStringRep()) + .put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(regionSize).getStringRep()) + .put("path.home", createTempDir()) + .build(); + + final AtomicLong relativeTimeInMillis = new AtomicLong(0L); + final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue(); + try ( + NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); + var cacheService = new SharedBlobCacheService<>( + environment, + settings, + taskQueue.getThreadPool(), + ThreadPool.Names.GENERIC, + "bulk", + BlobCacheMetrics.NOOP, + relativeTimeInMillis::get + ) + ) { + final Set cacheKeys = new HashSet<>(); + + assertThat("All regions are free", cacheService.freeRegionCount(), equalTo(numRegions)); + assertThat("Cache has no entries", cacheService.maybeEvictLeastUsed(), is(false)); + + // use all regions in cache + for (int i = 0; i < numRegions; i++) { + final var cacheKey = generateCacheKey(); + var entry = cacheService.get(cacheKey, regionSize, 0); + entry.populate( + ByteRange.of(0L, regionSize), + (channel, channelPos, relativePos, length, progressUpdater) -> progressUpdater.accept(length), + taskQueue.getThreadPool().generic(), + ActionListener.noop() + ); + assertThat(cacheService.getFreq(entry), equalTo(1)); + relativeTimeInMillis.incrementAndGet(); + cacheKeys.add(cacheKey); + } + + assertThat("All regions are used", cacheService.freeRegionCount(), equalTo(0)); + assertThat("Cache entries are not old enough to be evicted", cacheService.maybeEvictLeastUsed(), is(false)); + + taskQueue.runAllRunnableTasks(); + + assertThat("All regions are used", cacheService.freeRegionCount(), equalTo(0)); + assertThat("Cache entries are not old enough to be evicted", cacheService.maybeEvictLeastUsed(), is(false)); + + // simulate elapsed time + var minInternalMillis = SharedBlobCacheService.SHARED_CACHE_MIN_TIME_DELTA_SETTING.getDefault(Settings.EMPTY).millis(); + relativeTimeInMillis.addAndGet(minInternalMillis); + + // touch some random cache entries + var unusedCacheKeys = Set.copyOf(randomSubsetOf(cacheKeys)); + cacheKeys.forEach(key -> { + if (unusedCacheKeys.contains(key) == false) { + var entry = cacheService.get(key, regionSize, 0); + assertThat(cacheService.getFreq(entry), equalTo(2)); + } + }); + + assertThat("All regions are used", cacheService.freeRegionCount(), equalTo(0)); + assertThat("Cache entries are not old enough to be evicted", cacheService.maybeEvictLeastUsed(), is(false)); + + for (int i = 1; i <= unusedCacheKeys.size(); i++) { + // need to advance time and compute decay to decrease frequencies in cache and have an evictable entry + relativeTimeInMillis.addAndGet(minInternalMillis); + cacheService.computeDecay(); + + assertThat("Cache entry is old enough to be evicted", cacheService.maybeEvictLeastUsed(), is(true)); + assertThat(cacheService.freeRegionCount(), equalTo(i)); + } + + assertThat("No more cache entries old enough to be evicted", cacheService.maybeEvictLeastUsed(), is(false)); + assertThat(cacheService.freeRegionCount(), equalTo(unusedCacheKeys.size())); + } + } + + public void testMaybeFetchRegion() throws Exception { + final long cacheSize = size(500L); + final long regionSize = size(100L); + Settings settings = Settings.builder() + .put(NODE_NAME_SETTING.getKey(), "node") + .put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(cacheSize).getStringRep()) + .put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(regionSize).getStringRep()) + .put("path.home", createTempDir()) + .build(); + + AtomicInteger bulkTaskCount = new AtomicInteger(0); + ThreadPool threadPool = new TestThreadPool("test") { + @Override + public ExecutorService executor(String name) { + ExecutorService generic = super.executor(Names.GENERIC); + if (Objects.equals(name, "bulk")) { + return new StoppableExecutorServiceWrapper(generic) { + @Override + public void execute(Runnable command) { + super.execute(command); + bulkTaskCount.incrementAndGet(); + } + }; + } + return generic; + } + }; + final AtomicLong relativeTimeInMillis = new AtomicLong(0L); + try ( + NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); + var cacheService = new SharedBlobCacheService<>( + environment, + settings, + threadPool, + ThreadPool.Names.GENERIC, + "bulk", + BlobCacheMetrics.NOOP, + relativeTimeInMillis::get + ) + ) { + { + // fetch a single region + final var cacheKey = generateCacheKey(); + assertEquals(5, cacheService.freeRegionCount()); + final long blobLength = size(250); // 3 regions + AtomicLong bytesRead = new AtomicLong(0L); + final PlainActionFuture future = new PlainActionFuture<>(); + cacheService.maybeFetchRegion(cacheKey, 0, blobLength, (channel, channelPos, relativePos, length, progressUpdater) -> { + bytesRead.addAndGet(length); + progressUpdater.accept(length); + }, future); + + var fetched = future.get(10, TimeUnit.SECONDS); + assertThat("Region has been fetched", fetched, is(true)); + assertEquals(regionSize, bytesRead.get()); + assertEquals(4, cacheService.freeRegionCount()); + assertEquals(1, bulkTaskCount.get()); + } + { + // fetch multiple regions to used all the cache + final int remainingFreeRegions = cacheService.freeRegionCount(); + assertEquals(4, cacheService.freeRegionCount()); + + final var cacheKey = generateCacheKey(); + final long blobLength = regionSize * remainingFreeRegions; + AtomicLong bytesRead = new AtomicLong(0L); + + final PlainActionFuture> future = new PlainActionFuture<>(); + final var listener = new GroupedActionListener<>(remainingFreeRegions, future); + for (int region = 0; region < remainingFreeRegions; region++) { + relativeTimeInMillis.addAndGet(1_000L); + cacheService.maybeFetchRegion( + cacheKey, + region, + blobLength, + (channel, channelPos, relativePos, length, progressUpdater) -> { + bytesRead.addAndGet(length); + progressUpdater.accept(length); + }, + listener + ); + } + + var results = future.get(10, TimeUnit.SECONDS); + assertThat(results.stream().allMatch(result -> result), is(true)); + assertEquals(blobLength, bytesRead.get()); + assertEquals(0, cacheService.freeRegionCount()); + assertEquals(1 + remainingFreeRegions, bulkTaskCount.get()); + } + { + // cache fully used, no entry old enough to be evicted + assertEquals(0, cacheService.freeRegionCount()); + final var cacheKey = generateCacheKey(); + final PlainActionFuture future = new PlainActionFuture<>(); + cacheService.maybeFetchRegion( + cacheKey, + randomIntBetween(0, 10), + randomLongBetween(1L, regionSize), + (channel, channelPos, relativePos, length, progressUpdater) -> { + throw new AssertionError("should not be executed"); + }, + future + ); + assertThat("Listener is immediately completed", future.isDone(), is(true)); + assertThat("Region already exists in cache", future.get(), is(false)); + } + { + // simulate elapsed time and compute decay + var minInternalMillis = SharedBlobCacheService.SHARED_CACHE_MIN_TIME_DELTA_SETTING.getDefault(Settings.EMPTY).millis(); + relativeTimeInMillis.addAndGet(minInternalMillis * 2); + cacheService.computeDecay(); + + // fetch one more region should evict an old cache entry + final var cacheKey = generateCacheKey(); + assertEquals(0, cacheService.freeRegionCount()); + long blobLength = randomLongBetween(1L, regionSize); + AtomicLong bytesRead = new AtomicLong(0L); + final PlainActionFuture future = new PlainActionFuture<>(); + cacheService.maybeFetchRegion(cacheKey, 0, blobLength, (channel, channelPos, relativePos, length, progressUpdater) -> { + bytesRead.addAndGet(length); + progressUpdater.accept(length); + }, future); + + var fetched = future.get(10, TimeUnit.SECONDS); + assertThat("Region has been fetched", fetched, is(true)); + assertEquals(blobLength, bytesRead.get()); + assertEquals(0, cacheService.freeRegionCount()); + } + } + + threadPool.shutdown(); + } + + public void testPopulate() throws Exception { + final long regionSize = size(1L); + Settings settings = Settings.builder() + .put(NODE_NAME_SETTING.getKey(), "node") + .put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep()) + .put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(regionSize).getStringRep()) + .put("path.home", createTempDir()) + .build(); + + final AtomicLong relativeTimeInMillis = new AtomicLong(0L); + final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue(); + try ( + NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); + var cacheService = new SharedBlobCacheService<>( + environment, + settings, + taskQueue.getThreadPool(), + ThreadPool.Names.GENERIC, + ThreadPool.Names.GENERIC, + BlobCacheMetrics.NOOP, + relativeTimeInMillis::get + ) + ) { + final var cacheKey = generateCacheKey(); + final var blobLength = size(12L); + + // start populating the first region + var entry = cacheService.get(cacheKey, blobLength, 0); + AtomicLong bytesWritten = new AtomicLong(0L); + final PlainActionFuture future1 = new PlainActionFuture<>(); + entry.populate(ByteRange.of(0, regionSize - 1), (channel, channelPos, relativePos, length, progressUpdater) -> { + bytesWritten.addAndGet(length); + progressUpdater.accept(length); + }, taskQueue.getThreadPool().generic(), future1); + + assertThat(future1.isDone(), is(false)); + assertThat(taskQueue.hasRunnableTasks(), is(true)); + + // start populating the second region + entry = cacheService.get(cacheKey, blobLength, 1); + final PlainActionFuture future2 = new PlainActionFuture<>(); + entry.populate(ByteRange.of(0, regionSize - 1), (channel, channelPos, relativePos, length, progressUpdater) -> { + bytesWritten.addAndGet(length); + progressUpdater.accept(length); + }, taskQueue.getThreadPool().generic(), future2); + + // start populating again the first region, listener should be called immediately + entry = cacheService.get(cacheKey, blobLength, 0); + final PlainActionFuture future3 = new PlainActionFuture<>(); + entry.populate(ByteRange.of(0, regionSize - 1), (channel, channelPos, relativePos, length, progressUpdater) -> { + bytesWritten.addAndGet(length); + progressUpdater.accept(length); + }, taskQueue.getThreadPool().generic(), future3); + + assertThat(future3.isDone(), is(true)); + var written = future3.get(10L, TimeUnit.SECONDS); + assertThat(written, is(false)); + + taskQueue.runAllRunnableTasks(); + + written = future1.get(10L, TimeUnit.SECONDS); + assertThat(future1.isDone(), is(true)); + assertThat(written, is(true)); + written = future2.get(10L, TimeUnit.SECONDS); + assertThat(future2.isDone(), is(true)); + assertThat(written, is(true)); + } + } + private void assertThatNonPositiveRecoveryRangeSizeRejected(Setting setting) { final String value = randomFrom(ByteSizeValue.MINUS_ONE, ByteSizeValue.ZERO).getStringRep(); final Settings settings = Settings.builder() diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 7e0e2d1493417..4a3a92aa80bc8 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -137,7 +137,7 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E public static final String CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY = "remote_cluster_name"; public static final String REQUESTED_OPS_MISSING_METADATA_KEY = "es.requested_operations_missing"; - public static final TransportVersion TRANSPORT_VERSION_ACTION_WITH_SHARD_ID = TransportVersions.V_8_500_020; + public static final TransportVersion TRANSPORT_VERSION_ACTION_WITH_SHARD_ID = TransportVersions.V_8_9_X; private final boolean enabled; private final Settings settings; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index ea4bc8c92047a..4ce64bc41d6a1 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -15,12 +15,12 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.analysis.common.CommonAnalysisPlugin; import org.elasticsearch.client.internal.Client; @@ -461,8 +461,8 @@ protected final Index resolveFollowerIndex(String index) { return new Index(index, uuid); } - protected final RefreshResponse refresh(Client client, String... indices) { - RefreshResponse actionGet = client.admin().indices().prepareRefresh(indices).get(); + protected final BroadcastResponse refresh(Client client, String... indices) { + BroadcastResponse actionGet = client.admin().indices().prepareRefresh(indices).get(); assertNoFailures(actionGet); return actionGet; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java index dc5169648e0cd..45b9d557b72b3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java @@ -23,7 +23,7 @@ public class EnterpriseSearchFeatureSetUsage extends XPackFeatureSet.Usage { static final TransportVersion BEHAVIORAL_ANALYTICS_TRANSPORT_VERSION = TransportVersions.V_8_8_1; - static final TransportVersion QUERY_RULES_TRANSPORT_VERSION = TransportVersions.V_8_500_061; + static final TransportVersion QUERY_RULES_TRANSPORT_VERSION = TransportVersions.V_8_10_X; public static final String SEARCH_APPLICATIONS = "search_applications"; public static final String ANALYTICS_COLLECTIONS = "analytics_collections"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java index edac3498ca4e4..91cce4126d3a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java @@ -48,7 +48,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_8_500_020; + return TransportVersions.V_8_9_X; } @Override @@ -112,7 +112,7 @@ public LifecycleStats( } public static LifecycleStats read(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { return new LifecycleStats(in.readVLong(), in.readVLong(), in.readVLong(), in.readDouble(), in.readBoolean()); } else { return INITIAL; @@ -121,7 +121,7 @@ public static LifecycleStats read(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeVLong(dataStreamsWithLifecyclesCount); out.writeVLong(minRetentionMillis); out.writeVLong(maxRetentionMillis); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java index ef93ab914f08f..dcaf5057e9d43 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java @@ -69,7 +69,7 @@ public Request() {} public Request(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X) && in.readBoolean()) { this.indexStartTimeMillis = in.readVLong(); this.indexEndTimeMillis = in.readVLong(); } else { @@ -132,7 +132,7 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { out.writeBoolean(true); out.writeVLong(indexStartTimeMillis); out.writeVLong(indexEndTimeMillis); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java index 2700ed844d063..8d1d4aec6e7c2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java @@ -144,7 +144,7 @@ public DownsampleShardStatus(StreamInput in) throws IOException { numSent = in.readLong(); numIndexed = in.readLong(); numFailed = in.readLong(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X) && in.readBoolean()) { totalShardDocCount = in.readVLong(); lastSourceTimestamp = in.readVLong(); lastTargetTimestamp = in.readVLong(); @@ -254,7 +254,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(numSent); out.writeLong(numIndexed); out.writeLong(numFailed); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { out.writeBoolean(true); out.writeVLong(totalShardDocCount); out.writeVLong(lastSourceTimestamp); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java index 818b45c2b5d00..59ff38b317327 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java @@ -91,7 +91,7 @@ public DownsampleAction(final DateHistogramInterval fixedInterval, final TimeVal public DownsampleAction(StreamInput in) throws IOException { this( new DateHistogramInterval(in), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X) ? TimeValue.parseTimeValue(in.readString(), WAIT_TIMEOUT_FIELD.getPreferredName()) : DEFAULT_WAIT_TIMEOUT ); @@ -100,7 +100,7 @@ public DownsampleAction(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { fixedInterval.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { out.writeString(waitTimeout.getStringRep()); } else { out.writeString(DEFAULT_WAIT_TIMEOUT.getStringRep()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java index 12fba46e40689..c316e130ecb81 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java @@ -79,7 +79,7 @@ public Request(StreamInput in) throws IOException { advanceTime = in.readOptionalString(); skipTime = in.readOptionalString(); waitForNormalization = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { refreshRequired = in.readBoolean(); } } @@ -93,7 +93,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(advanceTime); out.writeOptionalString(skipTime); out.writeBoolean(waitForNormalization); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(refreshRequired); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java index e8b0041875b07..d819f7d846843 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -334,7 +334,7 @@ public boolean equals(Object obj) { } } - private QueryPage jobsStats; + private final QueryPage jobsStats; public Response(QueryPage jobsStats) { super(Collections.emptyList(), Collections.emptyList()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java index 5341efeec1094..6f64c41c8dee9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java @@ -91,7 +91,7 @@ public Request(StreamInput in) throws IOException { this.part = in.readVInt(); this.totalDefinitionLength = in.readVLong(); this.totalParts = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { this.allowOverwriting = in.readBoolean(); } else { this.allowOverwriting = false; @@ -148,7 +148,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(part); out.writeVLong(totalDefinitionLength); out.writeVInt(totalParts); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { out.writeBoolean(allowOverwriting); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java index c153cbc2c039b..ed988f952bc97 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java @@ -86,12 +86,12 @@ public Request(StreamInput in) throws IOException { } else { this.merges = List.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { this.scores = in.readCollectionAsList(StreamInput::readDouble); } else { this.scores = List.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { this.allowOverwriting = in.readBoolean(); } else { this.allowOverwriting = false; @@ -136,10 +136,10 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { out.writeStringCollection(merges); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeCollection(scores, StreamOutput::writeDouble); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { out.writeBoolean(allowOverwriting); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java index 883c94093a2c5..2254959242eab 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java @@ -66,7 +66,7 @@ public FlushAcknowledgement(String id, Instant lastFinalizedBucketEnd, Boolean r public FlushAcknowledgement(StreamInput in) throws IOException { id = in.readString(); lastFinalizedBucketEnd = in.readOptionalInstant(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { refreshRequired = in.readBoolean(); } else { refreshRequired = true; @@ -77,7 +77,7 @@ public FlushAcknowledgement(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeOptionalInstant(lastFinalizedBucketEnd); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(refreshRequired); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java index 7596fe75b4173..10b7730b58c9b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java @@ -134,12 +134,12 @@ public AsyncStatusResponse(StreamInput in) throws IOException { this.skippedShards = in.readVInt(); this.failedShards = in.readVInt(); this.completionStatus = (this.isRunning == false) ? RestStatus.readFrom(in) : null; - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { this.clusters = in.readOptionalWriteable(SearchResponse.Clusters::new); } else { this.clusters = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { this.completionTimeMillis = in.readOptionalVLong(); } else { this.completionTimeMillis = null; @@ -160,11 +160,11 @@ public void writeTo(StreamOutput out) throws IOException { if (isRunning == false) { RestStatus.writeTo(out, completionStatus); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { // optional since only CCS uses is; it is null for local-only searches out.writeOptionalWriteable(clusters); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { out.writeOptionalVLong(completionTimeMillis); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java index fbc08a0dee8aa..bdb721df2ffd9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; +import org.elasticsearch.xpack.core.security.action.user.QueryUserResponse; /** * A collection of actions types for the Security plugin that need to be available in xpack.core.security and thus cannot be stored @@ -20,4 +21,6 @@ public final class ActionTypes { public static final ActionType RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION = ActionType.localOnly( "cluster:admin/xpack/security/remote_cluster_credentials/reload" ); + + public static final ActionType QUERY_USER_ACTION = ActionType.localOnly("cluster:admin/xpack/security/user/query"); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java index e57570ce7385b..5753fa3b4ad7a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java @@ -46,7 +46,7 @@ */ public final class ApiKey implements ToXContentObject, Writeable { - public static final TransportVersion CROSS_CLUSTER_KEY_VERSION = TransportVersions.V_8_500_020; + public static final TransportVersion CROSS_CLUSTER_KEY_VERSION = TransportVersions.V_8_9_X; public enum Type { /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java index 71e0c98fb0012..a8b14795e2dd8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java @@ -26,7 +26,7 @@ */ public final class GetApiKeyRequest extends ActionRequest { - static TransportVersion API_KEY_ACTIVE_ONLY_PARAM_TRANSPORT_VERSION = TransportVersions.V_8_500_061; + static TransportVersion API_KEY_ACTIVE_ONLY_PARAM_TRANSPORT_VERSION = TransportVersions.V_8_10_X; private final String realmName; private final String userName; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java index 73ee4d1f27299..dfad1fe376706 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java @@ -20,7 +20,7 @@ public class AuthenticateResponse extends ActionResponse implements ToXContent { - public static final TransportVersion VERSION_OPERATOR_FIELD = TransportVersions.V_8_500_061; + public static final TransportVersion VERSION_OPERATOR_FIELD = TransportVersions.V_8_10_X; private final Authentication authentication; private final boolean operator; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/QueryUserRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/QueryUserRequest.java new file mode 100644 index 0000000000000..6db7e93b66eda --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/QueryUserRequest.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.searchafter.SearchAfterBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request for the query Users API.
    + * Model for API requests to the query users API + */ +public final class QueryUserRequest extends ActionRequest { + + @Nullable + private final QueryBuilder queryBuilder; + @Nullable + private final Integer from; + @Nullable + private final Integer size; + @Nullable + private final List fieldSortBuilders; + @Nullable + private final SearchAfterBuilder searchAfterBuilder; + + public QueryUserRequest() { + this(null); + } + + public QueryUserRequest(QueryBuilder queryBuilder) { + this(queryBuilder, null, null, null, null); + } + + public QueryUserRequest( + @Nullable QueryBuilder queryBuilder, + @Nullable Integer from, + @Nullable Integer size, + @Nullable List fieldSortBuilders, + @Nullable SearchAfterBuilder searchAfterBuilder + ) { + this.queryBuilder = queryBuilder; + this.from = from; + this.size = size; + this.fieldSortBuilders = fieldSortBuilders; + this.searchAfterBuilder = searchAfterBuilder; + } + + public QueryBuilder getQueryBuilder() { + return queryBuilder; + } + + public Integer getFrom() { + return from; + } + + public Integer getSize() { + return size; + } + + public List getFieldSortBuilders() { + return fieldSortBuilders; + } + + public SearchAfterBuilder getSearchAfterBuilder() { + return searchAfterBuilder; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (from != null && from < 0) { + validationException = addValidationError("[from] parameter cannot be negative but was [" + from + "]", validationException); + } + if (size != null && size < 0) { + validationException = addValidationError("[size] parameter cannot be negative but was [" + size + "]", validationException); + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + TransportAction.localOnly(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/QueryUserResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/QueryUserResponse.java new file mode 100644 index 0000000000000..57d156cf05ca0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/QueryUserResponse.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.security.user.User; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Objects; + +/** + * Response for the query Users API.
    + * Model used to serialize information about the Users that were found. + */ +public final class QueryUserResponse extends ActionResponse implements ToXContentObject { + + private final long total; + private final Item[] items; + + public QueryUserResponse(long total, Collection items) { + this.total = total; + Objects.requireNonNull(items, "items must be provided"); + this.items = items.toArray(new Item[0]); + } + + public static QueryUserResponse emptyResponse() { + return new QueryUserResponse(0, Collections.emptyList()); + } + + public long getTotal() { + return total; + } + + public Item[] getItems() { + return items; + } + + public int getCount() { + return items.length; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject().field("total", total).field("count", items.length).array("users", (Object[]) items); + return builder.endObject(); + } + + @Override + public String toString() { + return "QueryUsersResponse{" + "total=" + total + ", items=" + Arrays.toString(items) + '}'; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + TransportAction.localOnly(); + } + + public record Item(User user, @Nullable Object[] sortValues) implements ToXContentObject { + + @Override + public Object[] sortValues() { + return sortValues; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + user.innerToXContent(builder); + if (sortValues != null && sortValues.length > 0) { + builder.array("_sort", sortValues); + } + builder.endObject(); + return builder; + } + + @Override + public String toString() { + return "Item{" + "user=" + user + ", sortValues=" + Arrays.toString(sortValues) + '}'; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index f39eca877432c..2857cbfd1bdd2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -52,7 +52,7 @@ */ public class RoleDescriptor implements ToXContentObject, Writeable { - public static final TransportVersion WORKFLOWS_RESTRICTION_VERSION = TransportVersions.V_8_500_020; + public static final TransportVersion WORKFLOWS_RESTRICTION_VERSION = TransportVersions.V_8_9_X; public static final String ROLE_TYPE = "role"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java index f93599cdb98cc..ba6bca802070a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java @@ -29,6 +29,7 @@ import org.elasticsearch.xpack.core.ilm.action.GetLifecycleAction; import org.elasticsearch.xpack.core.ilm.action.GetStatusAction; import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; @@ -234,6 +235,7 @@ public class ClusterPrivilegeResolver { GetServiceAccountAction.NAME, GetServiceAccountCredentialsAction.NAME + "*", GetUsersAction.NAME, + ActionTypes.QUERY_USER_ACTION.name(), GetUserPrivilegesAction.NAME, // normally authorized under the "same-user" authz check, but added here for uniformity HasPrivilegesAction.NAME, GetSecuritySettingsAction.NAME diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeStepTests.java index 962e789cac7d6..b16983c6a7ac6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeStepTests.java @@ -10,9 +10,9 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -70,13 +70,13 @@ public void testPerformActionComplete() throws Exception { Step.StepKey stepKey = randomStepKey(); StepKey nextStepKey = randomStepKey(); int maxNumSegments = randomIntBetween(1, 10); - ForceMergeResponse forceMergeResponse = Mockito.mock(ForceMergeResponse.class); + BroadcastResponse forceMergeResponse = Mockito.mock(BroadcastResponse.class); Mockito.when(forceMergeResponse.getStatus()).thenReturn(RestStatus.OK); Mockito.doAnswer(invocationOnMock -> { ForceMergeRequest request = (ForceMergeRequest) invocationOnMock.getArguments()[0]; assertThat(request.maxNumSegments(), equalTo(maxNumSegments)); @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; listener.onResponse(forceMergeResponse); return null; }).when(indicesClient).forceMerge(any(), any()); @@ -95,7 +95,7 @@ public void testPerformActionThrowsException() { Step.StepKey stepKey = randomStepKey(); StepKey nextStepKey = randomStepKey(); int maxNumSegments = randomIntBetween(1, 10); - ForceMergeResponse forceMergeResponse = Mockito.mock(ForceMergeResponse.class); + BroadcastResponse forceMergeResponse = Mockito.mock(BroadcastResponse.class); Mockito.when(forceMergeResponse.getStatus()).thenReturn(RestStatus.OK); Mockito.doAnswer(invocationOnMock -> { ForceMergeRequest request = (ForceMergeRequest) invocationOnMock.getArguments()[0]; @@ -103,7 +103,7 @@ public void testPerformActionThrowsException() { assertThat(request.indices()[0], equalTo(indexMetadata.getIndex().getName())); assertThat(request.maxNumSegments(), equalTo(maxNumSegments)); @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; listener.onFailure(exception); return null; }).when(indicesClient).forceMerge(any(), any()); @@ -126,7 +126,7 @@ public void testForcemergeFailsOnSomeShards() { .numberOfReplicas(randomIntBetween(0, 5)) .build(); Index index = indexMetadata.getIndex(); - ForceMergeResponse forceMergeResponse = Mockito.mock(ForceMergeResponse.class); + BroadcastResponse forceMergeResponse = Mockito.mock(BroadcastResponse.class); Mockito.when(forceMergeResponse.getTotalShards()).thenReturn(numberOfShards); Mockito.when(forceMergeResponse.getFailedShards()).thenReturn(numberOfShards - 1); Mockito.when(forceMergeResponse.getStatus()).thenReturn(RestStatus.BAD_REQUEST); @@ -143,7 +143,7 @@ public void testForcemergeFailsOnSomeShards() { Mockito.doAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; listener.onResponse(forceMergeResponse); return null; }).when(indicesClient).forceMerge(any(), any()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkStepTests.java index 92ba5d2ad4efb..db8ac28dd1b98 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkStepTests.java @@ -7,9 +7,9 @@ package org.elasticsearch.xpack.core.ilm; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; -import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasMetadata; @@ -100,7 +100,7 @@ public void testPerformAction() throws Exception { Mockito.doAnswer(invocation -> { ResizeRequest request = (ResizeRequest) invocation.getArguments()[0]; @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[1]; assertThat(request.getSourceIndex(), equalTo(sourceIndexMetadata.getIndex().getName())); assertThat(request.getTargetIndexRequest().aliases(), equalTo(Collections.emptySet())); @@ -119,7 +119,7 @@ public void testPerformAction() throws Exception { ); } request.setMaxPrimaryShardSize(step.getMaxPrimaryShardSize()); - listener.onResponse(new ResizeResponse(true, true, sourceIndexMetadata.getIndex().getName())); + listener.onResponse(new CreateIndexResponse(true, true, sourceIndexMetadata.getIndex().getName())); return null; }).when(indicesClient).resizeIndex(Mockito.any(), Mockito.any()); @@ -181,8 +181,8 @@ public void testPerformActionIsCompleteForUnAckedRequests() throws Exception { Mockito.doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; - listener.onResponse(new ResizeResponse(false, false, indexMetadata.getIndex().getName())); + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new CreateIndexResponse(false, false, indexMetadata.getIndex().getName())); return null; }).when(indicesClient).resizeIndex(Mockito.any(), Mockito.any()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java index a369219bd7c3c..6d85e90dc3108 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java @@ -53,7 +53,7 @@ protected Writeable.Reader instanceReader() { @Override protected Request mutateInstanceForVersion(Request instance, TransportVersion version) { - if (version.before(TransportVersions.V_8_500_020)) { + if (version.before(TransportVersions.V_8_9_X)) { instance.setRefreshRequired(true); } return instance; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java index ee304f966c9b4..7f37ff85f1fda 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java @@ -72,7 +72,7 @@ protected Writeable.Reader instanceReader() { @Override protected Request mutateInstanceForVersion(Request instance, TransportVersion version) { - if (version.before(TransportVersions.V_8_500_061)) { + if (version.before(TransportVersions.V_8_10_X)) { return new Request( instance.getModelId(), instance.getDefinition(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/QueryUserRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/QueryUserRequestTests.java new file mode 100644 index 0000000000000..e7d8ef0b65e39 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/QueryUserRequestTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.nullValue; + +public class QueryUserRequestTests extends ESTestCase { + public void testValidate() { + final QueryUserRequest request1 = new QueryUserRequest( + null, + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), + null, + null + ); + assertThat(request1.validate(), nullValue()); + + final QueryUserRequest request2 = new QueryUserRequest( + null, + randomIntBetween(Integer.MIN_VALUE, -1), + randomIntBetween(0, Integer.MAX_VALUE), + null, + null + ); + assertThat(request2.validate().getMessage(), containsString("[from] parameter cannot be negative")); + + final QueryUserRequest request3 = new QueryUserRequest( + null, + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(Integer.MIN_VALUE, -1), + null, + null + ); + assertThat(request3.validate().getMessage(), containsString("[size] parameter cannot be negative")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java index bddc30b8d7b83..21827c4b9a373 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; import org.elasticsearch.xpack.core.enrich.action.GetEnrichPolicyAction; import org.elasticsearch.xpack.core.enrich.action.PutEnrichPolicyAction; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheAction; import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; @@ -281,6 +282,7 @@ public void testReadSecurityPrivilege() { GetServiceAccountAction.NAME, GetServiceAccountCredentialsAction.NAME, GetUsersAction.NAME, + ActionTypes.QUERY_USER_ACTION.name(), HasPrivilegesAction.NAME, GetUserPrivilegesAction.NAME, GetSecuritySettingsAction.NAME @@ -339,16 +341,11 @@ public void testManageUserProfilePrivilege() { "cluster:admin/xpack/security/role/get", "cluster:admin/xpack/security/role/delete" ); - verifyClusterActionDenied( - ClusterPrivilegeResolver.MANAGE_USER_PROFILE, - "cluster:admin/xpack/security/role/put", - "cluster:admin/xpack/security/role/get", - "cluster:admin/xpack/security/role/delete" - ); verifyClusterActionDenied( ClusterPrivilegeResolver.MANAGE_USER_PROFILE, "cluster:admin/xpack/security/user/put", "cluster:admin/xpack/security/user/get", + "cluster:admin/xpack/security/user/query", "cluster:admin/xpack/security/user/delete" ); verifyClusterActionDenied( diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es-mb.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es-mb.json index b3d6dc3936d59..233c170890d40 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es-mb.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es-mb.json @@ -101,6 +101,13 @@ "type": "long" } } + }, + "total_data_set_size": { + "properties": { + "bytes": { + "type": "long" + } + } } } } @@ -623,6 +630,13 @@ "type": "long" } } + }, + "total_data_set_size": { + "properties": { + "bytes": { + "type": "long" + } + } } } }, @@ -1253,6 +1267,9 @@ "properties": { "size_in_bytes": { "type": "long" + }, + "total_data_set_size_in_bytes": { + "type": "long" } } }, @@ -1410,6 +1427,9 @@ }, "size_in_bytes": { "type": "long" + }, + "total_data_set_size_in_bytes": { + "type": "long" } } }, @@ -1704,6 +1724,13 @@ "type": "long" } } + }, + "total_data_set_size": { + "properties": { + "bytes": { + "type": "long" + } + } } } }, @@ -1828,6 +1855,13 @@ "type": "long" } } + }, + "total_data_set_size": { + "properties": { + "bytes": { + "type": "long" + } + } } } }, diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java index 4858b640dcc79..34558c289b555 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java @@ -155,8 +155,8 @@ static DeprecationIssue checkDiskLowWatermark( ) { DiskUsage usage = clusterInfo.getNodeMostAvailableDiskUsages().get(nodeId); if (usage != null) { - long freeBytes = usage.getFreeBytes(); - long totalBytes = usage.getTotalBytes(); + long freeBytes = usage.freeBytes(); + long totalBytes = usage.totalBytes(); if (exceedsLowWatermark(nodeSettings, clusterSettings, freeBytes, totalBytes) || exceedsLowWatermark(dynamicSettings, clusterSettings, freeBytes, totalBytes)) { return new DeprecationIssue( diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java index 34b7d3c90b267..813dcc8c8d5a4 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java @@ -91,7 +91,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_8_500_061; + return TransportVersions.V_8_10_X; } @Override diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java index 5cceffd0f4818..f3bb43b9a3f38 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java @@ -17,12 +17,12 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; import org.elasticsearch.client.internal.Client; @@ -848,7 +848,7 @@ public void onFailure(Exception e) { /** * Updates the downsample target index metadata (task status) */ - class RefreshDownsampleIndexActionListener implements ActionListener { + class RefreshDownsampleIndexActionListener implements ActionListener { private final ActionListener actionListener; private final TaskId parentTask; @@ -868,7 +868,7 @@ class RefreshDownsampleIndexActionListener implements ActionListener criteriaValues; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java index fcd0f6be8fbcb..f3bc07387512f 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java @@ -27,7 +27,7 @@ */ public class QueryRulesetListItem implements Writeable, ToXContentObject { - public static final TransportVersion EXPANDED_RULESET_COUNT_TRANSPORT_VERSION = TransportVersions.V_8_500_061; + public static final TransportVersion EXPANDED_RULESET_COUNT_TRANSPORT_VERSION = TransportVersions.V_8_10_X; public static final ParseField RULESET_ID_FIELD = new ParseField("ruleset_id"); public static final ParseField RULE_TOTAL_COUNT_FIELD = new ParseField("rule_total_count"); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java index b23ed92a5d9b8..3882b6c61bb2c 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java @@ -73,7 +73,7 @@ public class RuleQueryBuilder extends AbstractQueryBuilder { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_8_500_061; + return TransportVersions.V_8_10_X; } public RuleQueryBuilder(QueryBuilder organicQuery, Map matchCriteria, String rulesetId) { diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java index c0a286cc5c464..414705aff0b79 100644 --- a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction; import org.elasticsearch.action.support.ActionFilter; @@ -258,7 +257,7 @@ protected TaskId cancelTaskWithXOpaqueId(String id, String action) { TaskId taskId = findTaskWithXOpaqueId(id, action); assertNotNull(taskId); logger.trace("Cancelling task " + taskId); - CancelTasksResponse response = clusterAdmin().prepareCancelTasks().setTargetTaskId(taskId).get(); + ListTasksResponse response = clusterAdmin().prepareCancelTasks().setTargetTaskId(taskId).get(); assertThat(response.getTasks(), hasSize(1)); assertThat(response.getTasks().get(0).action(), equalTo(action)); logger.trace("Task is cancelled " + taskId); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java index 2d7a330560fcc..f9f9238b6c4ab 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java @@ -281,7 +281,7 @@ private Event(StreamInput in) throws IOException { } else { fetchFields = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { missing = in.readBoolean(); } else { missing = index.isEmpty(); @@ -304,7 +304,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(fetchFields, StreamOutput::writeWriteable); } } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { // for BWC, 8.9.1+ does not have "missing" attribute, but it considers events with an empty index "" as missing events // see https://github.com/elastic/elasticsearch/pull/98130 out.writeBoolean(missing); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java index ecb8ce633d985..011b0d09fd8c5 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java @@ -182,7 +182,8 @@ public static SearchRequest prepareRequest(SearchSourceBuilder source, boolean i } public static List searchHits(SearchResponse response) { - return Arrays.asList(response.getHits().getHits()); + // TODO remove unpooled usage + return Arrays.asList(response.getHits().asUnpooled().getHits()); } /** diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java index edbeb3d0a0d8c..255e94d6bda34 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java @@ -289,7 +289,7 @@ private List mutateEvents(List original, TransportVersion version) e.id(), e.source(), version.onOrAfter(TransportVersions.V_7_13_0) ? e.fetchFields() : null, - version.onOrAfter(TransportVersions.V_8_500_061) ? e.missing() : e.index().isEmpty() + version.onOrAfter(TransportVersions.V_8_10_X) ? e.missing() : e.index().isEmpty() ) ); } @@ -299,10 +299,10 @@ private List mutateEvents(List original, TransportVersion version) public void testEmptyIndexAsMissingEvent() throws IOException { Event event = new Event("", "", new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), null); BytesStreamOutput out = new BytesStreamOutput(); - out.setTransportVersion(TransportVersions.V_8_500_020);// 8.9.1 + out.setTransportVersion(TransportVersions.V_8_9_X);// 8.9.1 event.writeTo(out); ByteArrayStreamInput in = new ByteArrayStreamInput(out.bytes().array()); - in.setTransportVersion(TransportVersions.V_8_500_020); + in.setTransportVersion(TransportVersions.V_8_9_X); Event event2 = Event.readFrom(in); assertTrue(event2.missing()); } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java index f391e9bdae84b..7bb6a228f6e48 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java @@ -74,14 +74,14 @@ public void query(QueryRequest r, ActionListener l) { } long sortValue = implicitTiebreakerValues.get(ordinal); - SearchHit searchHit = new SearchHit(ordinal, String.valueOf(ordinal)); + SearchHit searchHit = SearchHit.unpooled(ordinal, String.valueOf(ordinal)); searchHit.sortValues( new SearchSortValues( new Long[] { (long) ordinal, sortValue }, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW } ) ); - SearchHits searchHits = new SearchHits(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); + SearchHits searchHits = SearchHits.unpooled(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); ActionListener.respondAndRelease( l, new SearchResponse(searchHits, null, null, false, false, null, 0, null, 0, 1, 0, 0, null, Clusters.EMPTY) @@ -94,7 +94,7 @@ public void fetchHits(Iterable> refs, ActionListener ref : refs) { List hits = new ArrayList<>(ref.size()); for (HitReference hitRef : ref) { - hits.add(new SearchHit(-1, hitRef.id())); + hits.add(SearchHit.unpooled(-1, hitRef.id())); } searchHits.add(hits); } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java index eb417570cb4a7..a8ed842e94c44 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java @@ -188,7 +188,7 @@ static class EventsAsHits { Map documentFields = new HashMap<>(); documentFields.put(KEY_FIELD_NAME, new DocumentField(KEY_FIELD_NAME, Collections.singletonList(value.v1()))); // save the timestamp both as docId (int) and as id (string) - SearchHit searchHit = new SearchHit(entry.getKey(), entry.getKey().toString()); + SearchHit searchHit = SearchHit.unpooled(entry.getKey(), entry.getKey().toString()); searchHit.addDocumentFields(documentFields, Map.of()); hits.add(searchHit); } @@ -215,7 +215,7 @@ public void query(QueryRequest r, ActionListener l) { Map> evs = ordinal != Integer.MAX_VALUE ? events.get(ordinal) : emptyMap(); EventsAsHits eah = new EventsAsHits(evs); - SearchHits searchHits = new SearchHits( + SearchHits searchHits = SearchHits.unpooled( eah.hits.toArray(SearchHits.EMPTY), new TotalHits(eah.hits.size(), Relation.EQUAL_TO), 0.0f @@ -232,7 +232,7 @@ public void fetchHits(Iterable> refs, ActionListener ref : refs) { List hits = new ArrayList<>(ref.size()); for (HitReference hitRef : ref) { - hits.add(new SearchHit(-1, hitRef.id())); + hits.add(SearchHit.unpooled(-1, hitRef.id())); } searchHits.add(hits); } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index 9141555fcd613..b880ec4b06926 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.composite.InternalComposite; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -171,8 +172,8 @@ public void fetchHits(Iterable> refs, ActionListener searchHits = new ArrayList<>(); - searchHits.add(new SearchHit(1, String.valueOf(1))); - searchHits.add(new SearchHit(2, String.valueOf(2))); + searchHits.add(SearchHit.unpooled(1, String.valueOf(1))); + searchHits.add(SearchHit.unpooled(2, String.valueOf(2))); return new Sample(new SequenceKey(randomAlphaOfLength(10)), searchHits); } @@ -224,7 +225,7 @@ void handleSearchRequest(ActionListener asSearchHitsList(Integer... docIds) { } List searchHits = new ArrayList<>(docIds.length); for (Integer docId : docIds) { - searchHits.add(new SearchHit(docId, docId.toString())); + searchHits.add(SearchHit.unpooled(docId, docId.toString())); } return searchHits; diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/CriterionOrdinalExtractionTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/CriterionOrdinalExtractionTests.java index b995693458095..f62100a98b066 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/CriterionOrdinalExtractionTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/CriterionOrdinalExtractionTests.java @@ -153,7 +153,7 @@ private SearchHit searchHit(Object timeValue, Object tiebreakerValue, Supplier fields = new HashMap<>(); fields.put(tsField, new DocumentField(tsField, singletonList(timeValue))); fields.put(tbField, new DocumentField(tsField, singletonList(tiebreakerValue))); - SearchHit searchHit = new SearchHit(randomInt(), randomAlphaOfLength(10)); + SearchHit searchHit = SearchHit.unpooled(randomInt(), randomAlphaOfLength(10)); searchHit.addDocumentFields(fields, Map.of()); searchHit.sortValues(searchSortValues.get()); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java index 9c9bbfcdc5127..0bdb88592ce0f 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java @@ -135,7 +135,7 @@ public void fetchHits(Iterable> refs, ActionListener ref : refs) { List hits = new ArrayList<>(ref.size()); for (HitReference hitRef : ref) { - hits.add(new SearchHit(-1, hitRef.id())); + hits.add(SearchHit.unpooled(-1, hitRef.id())); } searchHits.add(hits); } @@ -236,12 +236,12 @@ protected void @SuppressWarnings("unchecked") void handleSearchRequest(ActionListener listener, SearchRequest searchRequest) { int ordinal = searchRequest.source().terminateAfter(); - SearchHit searchHit = new SearchHit(ordinal, String.valueOf(ordinal)); + SearchHit searchHit = SearchHit.unpooled(ordinal, String.valueOf(ordinal)); searchHit.sortValues( new SearchSortValues(new Long[] { (long) ordinal, 1L }, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW }) ); - SearchHits searchHits = new SearchHits(new SearchHit[] { searchHit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 0.0f); + SearchHits searchHits = SearchHits.unpooled(new SearchHit[] { searchHit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 0.0f); SearchResponse response = new SearchResponse( searchHits, null, diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java index 7ef2b95d982fb..3097fbbc7f04a 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java @@ -107,11 +107,11 @@ static class TestQueryClient implements QueryClient { @Override public void query(QueryRequest r, ActionListener l) { int ordinal = r.searchSource().terminateAfter(); - SearchHit searchHit = new SearchHit(ordinal, String.valueOf(ordinal)); + SearchHit searchHit = SearchHit.unpooled(ordinal, String.valueOf(ordinal)); searchHit.sortValues( new SearchSortValues(new Long[] { (long) ordinal, 1L }, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW }) ); - SearchHits searchHits = new SearchHits(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); + SearchHits searchHits = SearchHits.unpooled(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); ActionListener.respondAndRelease( l, new SearchResponse(searchHits, null, null, false, false, null, 0, null, 0, 1, 0, 0, null, Clusters.EMPTY) @@ -124,7 +124,7 @@ public void fetchHits(Iterable> refs, ActionListener ref : refs) { List hits = new ArrayList<>(ref.size()); for (HitReference hitRef : ref) { - hits.add(new SearchHit(-1, hitRef.id())); + hits.add(SearchHit.unpooled(-1, hitRef.id())); } searchHits.add(hits); } @@ -425,12 +425,12 @@ private class SuccessfulESMockClient extends ESMockClient { @Override void handleSearchRequest(ActionListener listener, SearchRequest searchRequest) { int ordinal = searchRequest.source().terminateAfter(); - SearchHit searchHit = new SearchHit(ordinal, String.valueOf(ordinal)); + SearchHit searchHit = SearchHit.unpooled(ordinal, String.valueOf(ordinal)); searchHit.sortValues( new SearchSortValues(new Long[] { (long) ordinal, 1L }, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW }) ); - SearchHits searchHits = new SearchHits(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); + SearchHits searchHits = SearchHits.unpooled(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); SearchResponse response = new SearchResponse( searchHits, null, @@ -477,11 +477,11 @@ void handleSearchRequest(ActionListener void handleSearchRequest(ActionListener params = new HashMap<>(); + params.put("wait_for_completion_timeout", "30m"); + params.put("id", id); + String dropNullColumns = original.getApiCallSection().getParams().get("drop_null_columns"); + if (dropNullColumns != null) { + params.put("drop_null_columns", dropNullColumns); + } ClientYamlTestResponse fetchResponse = executionContext.callApi( "esql.async_query_get", - Map.of("wait_for_completion_timeout", "30m", "id", id), + params, List.of(), original.getApiCallSection().getHeaders(), original.getApiCallSection().getNodeSelector() diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java index 3968c2f33fca8..8886951030c07 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java @@ -205,6 +205,10 @@ public static void assertData( expectedValue = rebuildExpected(expectedValue, BytesRef.class, x -> GEO.wkbToWkt((BytesRef) x)); } else if (expectedType == Type.CARTESIAN_POINT) { expectedValue = rebuildExpected(expectedValue, BytesRef.class, x -> CARTESIAN.wkbToWkt((BytesRef) x)); + } else if (expectedType == Type.GEO_SHAPE) { + expectedValue = rebuildExpected(expectedValue, BytesRef.class, x -> GEO.wkbToWkt((BytesRef) x)); + } else if (expectedType == Type.CARTESIAN_SHAPE) { + expectedValue = rebuildExpected(expectedValue, BytesRef.class, x -> CARTESIAN.wkbToWkt((BytesRef) x)); } else if (expectedType == Type.IP) { // convert BytesRef-packed IP to String, allowing subsequent comparison with what's expected expectedValue = rebuildExpected(expectedValue, BytesRef.class, x -> DocValueFormat.IP.format((BytesRef) x)); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index d49d5a964e944..4e0f0b8661631 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -61,6 +61,9 @@ public final class CsvTestUtils { private static final int MAX_WIDTH = 20; private static final CsvPreference CSV_SPEC_PREFERENCES = new CsvPreference.Builder('"', '|', "\r\n").build(); private static final String NULL_VALUE = "null"; + private static final char ESCAPE_CHAR = '\\'; + public static final String COMMA_ESCAPING_REGEX = "(?> loadPageFromCsv(URL source) throws Excep record CsvColumn(String name, Type type, BuilderWrapper builderWrapper) implements Releasable { void append(String stringValue) { - if (stringValue.contains(",")) {// multi-value field + if (stringValue.startsWith("\"") && stringValue.endsWith("\"")) { // string value + stringValue = stringValue.substring(1, stringValue.length() - 1).replace(ESCAPED_COMMA_SEQUENCE, ","); + } else if (stringValue.contains(",")) {// multi-value field builderWrapper().builder().beginPositionEntry(); String[] arrayOfValues = delimitedListToStringArray(stringValue, ","); @@ -229,6 +234,8 @@ public void close() { * Takes a csv String and converts it to a String array. Also, it recognizes an opening bracket "[" in one string and a closing "]" * in another string and it creates a single concatenated comma-separated String of all the values between the opening bracket entry * and the closing bracket entry. In other words, entries enclosed by "[]" are returned as a single element. + * + * Commas can be escaped with \ (backslash) character. */ static String[] multiValuesAwareCsvToStringArray(String csvLine, int lineNumber) { var mvCompressedEntries = new ArrayList(); @@ -237,14 +244,20 @@ static String[] multiValuesAwareCsvToStringArray(String csvLine, int lineNumber) int pos = 0; // current position in the csv String int commaPos; // current "," character position + int previousCommaPos = 0; while ((commaPos = csvLine.indexOf(",", pos)) != -1 || pos <= csvLine.length()) { + if (commaPos > 0 && csvLine.charAt(commaPos - 1) == ESCAPE_CHAR) {// skip the escaped comma + pos = commaPos + 1;// moving on to the next character after comma + continue; + } + boolean isLastElement = commaPos == -1; - String entry = csvLine.substring(pos, isLastElement ? csvLine.length() : commaPos).trim(); + String entry = csvLine.substring(previousCommaPos, isLastElement ? csvLine.length() : commaPos).trim(); if (entry.startsWith("[")) { if (previousMvValue != null || (isLastElement && entry.endsWith("]") == false)) { String message = "Error line [{}:{}]: Unexpected start of a multi-value field value; current token [{}], " + (isLastElement ? "no closing point" : "previous token [{}]"); - throw new IllegalArgumentException(format(message, lineNumber, pos, entry, previousMvValue)); + throw new IllegalArgumentException(format(message, lineNumber, previousCommaPos, entry, previousMvValue)); } if (entry.endsWith("]")) { if (entry.length() > 2) {// single-valued multivalue field :shrug: @@ -263,7 +276,7 @@ static String[] multiValuesAwareCsvToStringArray(String csvLine, int lineNumber) format( "Error line [{}:{}]: Unexpected end of a multi-value field value (no previous starting point); found [{}]", lineNumber, - pos, + previousCommaPos, entry ) ); @@ -279,8 +292,8 @@ static String[] multiValuesAwareCsvToStringArray(String csvLine, int lineNumber) format( "Error line [{}:{}]: Unexpected missing value in a multi-value column; found [{}]", lineNumber, - pos, - csvLine.substring(pos - 1) + previousCommaPos, + csvLine.substring(previousCommaPos - 1) ) ); } @@ -290,12 +303,22 @@ static String[] multiValuesAwareCsvToStringArray(String csvLine, int lineNumber) } } pos = 1 + (isLastElement ? csvLine.length() : commaPos);// break out of the loop if it reached its last element + previousCommaPos = pos; } return mvCompressedEntries.toArray(String[]::new); } public record ExpectedResults(List columnNames, List columnTypes, List> values) {} + /** + * The method loads a section of a .csv-spec file representing the results of executing the query of that section. + * It reads both the schema (field names and their types) and the row values. + * Values starting with an opening square bracket and ending with a closing square bracket are considered multi-values. Inside + * these multi-values, commas separate the individual values and escaped commas are allowed with a prefixed \ + * default \ (backslash) character. + * @param csv a string representing the header and row values of a single query execution result + * @return data structure with column names, their types and values + */ public static ExpectedResults loadCsvSpecValues(String csv) { List columnNames; List columnTypes; @@ -338,13 +361,21 @@ public static ExpectedResults loadCsvSpecValues(String csv) { if (value.startsWith("[") ^ value.endsWith("]")) { throw new IllegalArgumentException("Incomplete multi-value (opening and closing square brackets) found " + value); } - if (value.contains(",") && value.startsWith("[")) {// commas outside a multi-value should be ok - List listOfMvValues = new ArrayList<>(); - for (String mvValue : delimitedListToStringArray(value.substring(1, value.length() - 1), ",")) { - listOfMvValues.add(columnTypes.get(i).convert(mvValue.trim())); + if (value.contains(",") && value.startsWith("[")) { + // split on commas but ignoring escaped commas + String[] multiValues = value.substring(1, value.length() - 1).split(COMMA_ESCAPING_REGEX); + if (multiValues.length > 0) { + List listOfMvValues = new ArrayList<>(); + for (String mvValue : multiValues) { + listOfMvValues.add(columnTypes.get(i).convert(mvValue.trim().replace(ESCAPED_COMMA_SEQUENCE, ","))); + } + rowValues.add(listOfMvValues); + } else { + rowValues.add(columnTypes.get(i).convert(value.replace(ESCAPED_COMMA_SEQUENCE, ","))); } - rowValues.add(listOfMvValues); } else { + // The value considered here is the one where any potential escaped comma is kept as is (with the escape char) + // TODO if we'd want escaped commas outside multi-values fields, we'd have to adjust this value here as well rowValues.add(columnTypes.get(i).convert(value)); } } @@ -392,7 +423,9 @@ public enum Type { ), BOOLEAN(Booleans::parseBoolean, Boolean.class), GEO_POINT(x -> x == null ? null : GEO.wktToWkb(x), BytesRef.class), - CARTESIAN_POINT(x -> x == null ? null : CARTESIAN.wktToWkb(x), BytesRef.class); + CARTESIAN_POINT(x -> x == null ? null : CARTESIAN.wktToWkb(x), BytesRef.class), + GEO_SHAPE(x -> x == null ? null : GEO.wktToWkb(x), BytesRef.class), + CARTESIAN_SHAPE(x -> x == null ? null : CARTESIAN.wktToWkb(x), BytesRef.class); private static final Map LOOKUP = new HashMap<>(); @@ -457,7 +490,7 @@ public static Type asType(ElementType elementType, Type actualType) { } private static Type bytesRefBlockType(Type actualType) { - if (actualType == GEO_POINT || actualType == CARTESIAN_POINT) { + if (actualType == GEO_POINT || actualType == CARTESIAN_POINT || actualType == GEO_SHAPE || actualType == CARTESIAN_SHAPE) { return actualType; } else { return KEYWORD; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 3df70b3b83d37..1e26a3df45419 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -42,8 +42,9 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.common.Strings.delimitedListToStringArray; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.esql.CsvTestUtils.COMMA_ESCAPING_REGEX; +import static org.elasticsearch.xpack.esql.CsvTestUtils.ESCAPED_COMMA_SEQUENCE; import static org.elasticsearch.xpack.esql.CsvTestUtils.multiValuesAwareCsvToStringArray; public class CsvTestsDataLoader { @@ -56,26 +57,29 @@ public class CsvTestsDataLoader { private static final TestsDataset CLIENT_IPS = new TestsDataset("clientips", "mapping-clientips.json", "clientips.csv"); private static final TestsDataset AIRPORTS = new TestsDataset("airports", "mapping-airports.json", "airports.csv"); private static final TestsDataset AIRPORTS_WEB = new TestsDataset("airports_web", "mapping-airports_web.json", "airports_web.csv"); + private static final TestsDataset COUNTRIES_BBOX = new TestsDataset( + "countries_bbox", + "mapping-countries_bbox.json", + "countries_bbox.csv" + ); + private static final TestsDataset COUNTRIES_BBOX_WEB = new TestsDataset( + "countries_bbox_web", + "mapping-countries_bbox_web.json", + "countries_bbox_web.csv" + ); - public static final Map CSV_DATASET_MAP = Map.of( - EMPLOYEES.indexName, - EMPLOYEES, - HOSTS.indexName, - HOSTS, - APPS.indexName, - APPS, - LANGUAGES.indexName, - LANGUAGES, - UL_LOGS.indexName, - UL_LOGS, - SAMPLE_DATA.indexName, - SAMPLE_DATA, - CLIENT_IPS.indexName, - CLIENT_IPS, - AIRPORTS.indexName, - AIRPORTS, - AIRPORTS_WEB.indexName, - AIRPORTS_WEB + public static final Map CSV_DATASET_MAP = Map.ofEntries( + Map.entry(EMPLOYEES.indexName, EMPLOYEES), + Map.entry(HOSTS.indexName, HOSTS), + Map.entry(APPS.indexName, APPS), + Map.entry(LANGUAGES.indexName, LANGUAGES), + Map.entry(UL_LOGS.indexName, UL_LOGS), + Map.entry(SAMPLE_DATA.indexName, SAMPLE_DATA), + Map.entry(CLIENT_IPS.indexName, CLIENT_IPS), + Map.entry(AIRPORTS.indexName, AIRPORTS), + Map.entry(AIRPORTS_WEB.indexName, AIRPORTS_WEB), + Map.entry(COUNTRIES_BBOX.indexName, COUNTRIES_BBOX), + Map.entry(COUNTRIES_BBOX_WEB.indexName, COUNTRIES_BBOX_WEB) ); private static final EnrichConfig LANGUAGES_ENRICH = new EnrichConfig("languages_policy", "enrich-policy-languages.json"); @@ -137,17 +141,33 @@ public static void main(String[] args) throws IOException { } try (RestClient client = builder.build()) { - loadDataSetIntoEs(client); + loadDataSetIntoEs(client, (restClient, indexName, indexMapping) -> { + Request request = new Request("PUT", "/" + indexName); + request.setJsonEntity("{\"mappings\":" + indexMapping + "}"); + restClient.performRequest(request); + }); } } + private static void loadDataSetIntoEs(RestClient client, IndexCreator indexCreator) throws IOException { + loadDataSetIntoEs(client, LogManager.getLogger(CsvTestsDataLoader.class), indexCreator); + } + public static void loadDataSetIntoEs(RestClient client) throws IOException { - loadDataSetIntoEs(client, LogManager.getLogger(CsvTestsDataLoader.class)); + loadDataSetIntoEs(client, (restClient, indexName, indexMapping) -> { + ESRestTestCase.createIndex(restClient, indexName, null, indexMapping, null); + }); } public static void loadDataSetIntoEs(RestClient client, Logger logger) throws IOException { + loadDataSetIntoEs(client, logger, (restClient, indexName, indexMapping) -> { + ESRestTestCase.createIndex(restClient, indexName, null, indexMapping, null); + }); + } + + private static void loadDataSetIntoEs(RestClient client, Logger logger, IndexCreator indexCreator) throws IOException { for (var dataSet : CSV_DATASET_MAP.values()) { - load(client, dataSet.indexName, "/" + dataSet.mappingFileName, "/" + dataSet.dataFileName, logger); + load(client, dataSet.indexName, "/" + dataSet.mappingFileName, "/" + dataSet.dataFileName, logger, indexCreator); } forceMerge(client, CSV_DATASET_MAP.keySet(), logger); for (var policy : ENRICH_POLICIES) { @@ -169,7 +189,14 @@ private static void loadEnrichPolicy(RestClient client, String policyName, Strin client.performRequest(request); } - private static void load(RestClient client, String indexName, String mappingName, String dataName, Logger logger) throws IOException { + private static void load( + RestClient client, + String indexName, + String mappingName, + String dataName, + Logger logger, + IndexCreator indexCreator + ) throws IOException { URL mapping = CsvTestsDataLoader.class.getResource(mappingName); if (mapping == null) { throw new IllegalArgumentException("Cannot find resource " + mappingName); @@ -178,14 +205,10 @@ private static void load(RestClient client, String indexName, String mappingName if (data == null) { throw new IllegalArgumentException("Cannot find resource " + dataName); } - createTestIndex(client, indexName, readTextFile(mapping)); + indexCreator.createIndex(client, indexName, readTextFile(mapping)); loadCsvData(client, indexName, data, CsvTestsDataLoader::createParser, logger); } - private static void createTestIndex(RestClient client, String indexName, String mapping) throws IOException { - ESRestTestCase.createIndex(client, indexName, null, mapping, null); - } - public static String readTextFile(URL resource) throws IOException { try (BufferedReader reader = TestUtils.reader(resource)) { StringBuilder b = new StringBuilder(); @@ -198,6 +221,20 @@ public static String readTextFile(URL resource) throws IOException { } @SuppressWarnings("unchecked") + /** + * Loads a classic csv file in an ES cluster using a RestClient. + * The structure of the file is as follows: + * - commented lines should start with "//" + * - the first non-comment line from the file is the schema line (comma separated field_name:ES_data_type elements) + * - sub-fields should be placed after the root field using a dot notation for the name: + * root_field:long,root_field.sub_field:integer + * - a special _id field can be used in the schema and the values of this field will be used in the bulk request as actual doc ids + * - all subsequent non-comment lines represent the values that will be used to build the _bulk request + * - an empty string "" refers to a null value + * - a value starting with an opening square bracket "[" and ending with a closing square bracket "]" refers to a multi-value field + * - multi-values are comma separated + * - commas inside multivalue fields can be escaped with \ (backslash) character + */ private static void loadCsvData( RestClient client, String indexName, @@ -278,17 +315,27 @@ private static void loadCsvData( if (i > 0 && row.length() > 0) { row.append(","); } - if (entries[i].contains(",")) {// multi-value + // split on comma ignoring escaped commas + String[] multiValues = entries[i].split(COMMA_ESCAPING_REGEX); + if (multiValues.length > 0) {// multi-value StringBuilder rowStringValue = new StringBuilder("["); - for (String s : delimitedListToStringArray(entries[i], ",")) { - rowStringValue.append("\"" + s + "\","); + for (String s : multiValues) { + if (entries[i].startsWith("\"") == false || entries[i].endsWith("\"") == false) { + rowStringValue.append("\"" + s + "\","); + } else { + rowStringValue.append(s + ","); + } } // remove the last comma and put a closing bracket instead rowStringValue.replace(rowStringValue.length() - 1, rowStringValue.length(), "]"); entries[i] = rowStringValue.toString(); } else { - entries[i] = "\"" + entries[i] + "\""; + if (entries[i].startsWith("\"") == false || entries[i].endsWith("\"") == false) { + entries[i] = "\"" + entries[i] + "\""; + } } + // replace any escaped commas with single comma + entries[i] = entries[i].replace(ESCAPED_COMMA_SEQUENCE, ","); row.append("\"" + columns[i] + "\":" + entries[i]); } catch (Exception e) { throw new IllegalArgumentException( @@ -356,4 +403,8 @@ private static XContentParser createParser(XContent xContent, InputStream data) public record TestsDataset(String indexName, String mappingFileName, String dataFileName) {} public record EnrichConfig(String policyName, String policyFileName) {} + + private interface IndexCreator { + void createIndex(RestClient client, String indexName, String mapping) throws IOException; + } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java index 8edcdd9edb124..9c8d5f420d53b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java @@ -145,7 +145,7 @@ public static Map loadMapping(String name) { } public static EnrichResolution emptyPolicyResolution() { - return new EnrichResolution(Set.of(), Set.of()); + return new EnrichResolution(); } public static SearchStats statsForMissingField(String... names) { diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox.csv new file mode 100644 index 0000000000000..f8701f386e73b --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox.csv @@ -0,0 +1,249 @@ +id:keyword,name:keyword,shape:geo_shape +FLK,Falkland Is.,"BBOX(-61.148055\, -57.733200\, -51.249455\, -52.343055)" +GUF,French Guiana,"BBOX(-54.603782\, -51.648055\, 5.755418\, 2.113473)" +GUY,Guyana,"BBOX(-61.389727\, -56.470636\, 8.535273\, 1.186873)" +PCN,Pitcairn Is.,"BBOX(-130.105055\, -128.286118\, -24.325836\, -25.082227)" +SGS,South Georgia & the South Sandwich Is.,"BBOX(-38.023755\, -26.241391\, -53.989727\, -58.498609)" +SHN,St. Helena,"BBOX(-5.792782\, -5.645282\, -15.903755\, -16.021946)" +SUR,Suriname,"BBOX(-58.071400\, -53.986118\, 6.001809\, 1.836245)" +TTO,Trinidad & Tobago,"BBOX(-61.921600\, -60.520836\, 11.345554\, 10.040345)" +VEN,Venezuela,"BBOX(-73.378064\, -59.803055\, 12.197500\, 0.649164)" +ASM,American Samoa,"BBOX(-170.823227\, -170.561873\, -14.254309\, -14.375555)" +COK,Cook Is.,"BBOX(-165.848345\, -157.703764\, -10.881318\, -21.940836)" +PYF,French Polynesia,"BBOX(-151.497773\, -138.809755\, -8.778191\, -17.870836)" +UMI,Jarvis I.,"BBOX(-160.045164\, -160.009464\, -0.374309\, -0.398055)" +NIU,Niue,"BBOX(-169.952236\, -169.781555\, -18.963336\, -19.145555)" +WSM,Samoa,"BBOX(-172.780027\, -171.429200\, -13.460555\, -14.057500)" +TKL,Tokelau,"BBOX(-171.862718\, -171.843764\, -9.170627\, -9.218891)" +TON,Tonga,"BBOX(-175.360000\, -173.906827\, -18.568055\, -21.268064)" +WLF,Wallis & Futuna,"BBOX(-178.190273\, -176.121936\, -13.214864\, -14.323891)" +ARG,Argentina,"BBOX(-73.582300\, -53.650009\, -21.780518\, -55.051673)" +BOL,Bolivia,"BBOX(-69.656191\, -57.521118\, -9.679191\, -22.901109)" +BRA,Brazil,"BBOX(-74.004591\, -34.792918\, 5.272709\, -33.741118)" +CHL,Chile,"BBOX(-109.446109\, -66.420627\, -17.505282\, -55.902227)" +ECU,Ecuador,"BBOX(-91.663891\, -75.216846\, 1.437782\, -5.000309)" +PRY,Paraguay,"BBOX(-62.643773\, -54.243900\, -19.296809\, -27.584727)" +PER,Peru,"BBOX(-81.355146\, -68.673909\, -0.036873\, -18.348546)" +URY,Uruguay,"BBOX(-58.438609\, -53.098300\, -30.096673\, -34.943818)" +UMI,Baker I.,"BBOX(-176.467655\, -176.455855\, 0.222573\, 0.215282)" +CAN,Canada,"BBOX(-141.002991\, -52.617364\, 83.113873\, 41.675554)" +GTM,Guatemala,"BBOX(-92.246782\, -88.214736\, 17.821109\, 13.745836)" +UMI,Howland I.,"BBOX(-176.643082\, -176.631091\, 0.808609\, 0.790282)" +UMI,Johnston Atoll,"BBOX(-169.538936\, -169.523927\, 16.730273\, 16.724164)" +MEX,Mexico,"BBOX(-118.404164\, -86.738618\, 32.718454\, 14.550545)" +UMI,Midway Is.,"BBOX(-177.395845\, -177.360545\, 28.221518\, 28.184154)" +BRB,Barbados,"BBOX(-59.659446\, -59.427082\, 13.337082\, 13.050554)" +DMA,Dominica,"BBOX(-61.491391\, -61.250700\, 15.631945\, 15.198054)" +GRD,Grenada,"BBOX(-61.785182\, -61.596391\, 12.237154\, 11.996945)" +GLP,Guadeloupe,"BBOX(-61.796109\, -61.187082\, 16.512918\, 15.870000)" +MTQ,Martinique,"BBOX(-61.231536\, -60.816946\, 14.880136\, 14.402773)" +LCA,St. Lucia,"BBOX(-61.079582\, -60.878064\, 14.109309\, 13.709445)" +SPM,St. Pierre & Miquelon,"BBOX(-56.397782\, -56.145500\, 47.135827\, 46.747191)" +VCT,St. Vincent & the Grenadines,"BBOX(-61.280146\, -61.120282\, 13.383191\, 13.130282)" +ABW,Aruba,"BBOX(-70.059664\, -69.874864\, 12.627773\, 12.411109)" +BMU,Bermuda,"BBOX(-64.823064\, -64.676809\, 32.379509\, 32.260554)" +DOM,Dominican Republic,"BBOX(-72.003064\, -68.322927\, 19.930827\, 17.604164)" +HTI,Haiti,"BBOX(-74.467791\, -71.629182\, 20.091454\, 18.022782)" +JAM,Jamaica,"BBOX(-78.373900\, -76.221118\, 18.522500\, 17.697218)" +ANT,Netherlands Antilles,"BBOX(-69.163618\, -68.192927\, 12.383891\, 12.020554)" +BHS,The Bahamas,"BBOX(-78.978900\, -72.738891\, 26.929164\, 20.915273)" +TCA,Turks & Caicos Is.,"BBOX(-72.031464\, -71.127573\, 21.957773\, 21.429918)" +BLZ,Belize,"BBOX(-89.216400\, -87.779591\, 18.489900\, 15.889854)" +CYM,Cayman Is.,"BBOX(-81.400836\, -81.093064\, 19.354164\, 19.265000)" +COL,Colombia,"BBOX(-81.720146\, -66.870455\, 12.590273\, -4.236873)" +CRI,Costa Rica,"BBOX(-85.911391\, -82.561400\, 11.212845\, 8.025673)" +CUB,Cuba,"BBOX(-84.952927\, -74.131255\, 23.194027\, 19.821945)" +SLV,El Salvador,"BBOX(-90.108064\, -87.694673\, 14.431982\, 13.156391)" +HND,Honduras,"BBOX(-89.350491\, -83.131855\, 16.435827\, 12.985173)" +NIC,Nicaragua,"BBOX(-87.689827\, -83.131855\, 15.022218\, 10.709691)" +PAN,Panama,"BBOX(-83.030291\, -77.198336\, 9.620136\, 7.206109)" +AIA,Anguilla,"BBOX(-63.167782\, -62.972709\, 18.272982\, 18.164445)" +ATG,Antigua & Barbuda,"BBOX(-61.891109\, -61.666946\, 17.724300\, 16.989718)" +VGB,British Virgin Is.,"BBOX(-64.698482\, -64.324527\, 18.504854\, 18.383891)" +MSR,Montserrat,"BBOX(-62.236946\, -62.138891\, 16.812354\, 16.671391)" +PRI,Puerto Rico,"BBOX(-67.266400\, -65.301118\, 18.519445\, 17.922218)" +KNA,St. Kitts & Nevis,"BBOX(-62.862782\, -62.622509\, 17.410136\, 17.208882)" +VIR,Virgin Is.,"BBOX(-65.023509\, -64.562573\, 18.387673\, 17.676664)" +FRO,Faroe Is.,"BBOX(-7.433473\, -6.389718\, 62.357500\, 61.388327)" +GRL,Greenland,"BBOX(-73.053609\, -12.157637\, 83.623600\, 59.790273)" +XGK,Guernsey,"BBOX(-2.668609\, -2.500973\, 49.508191\, 49.422491)" +ISL,Iceland,"BBOX(-24.538400\, -13.499446\, 66.536100\, 63.390000)" +IRL,Ireland,"BBOX(-10.474727\, -6.013055\, 55.379991\, 51.445545)" +XIM,Isle of Man,"BBOX(-4.787155\, -4.308682\, 54.416382\, 54.055545)" +SJM,Jan Mayen,"BBOX(-9.119909\, -7.928509\, 71.180818\, 70.803863)" +XJE,Jersey,"BBOX(-2.247364\, -2.015000\, 49.261109\, 49.167773)" +GBR,United Kingdom,"BBOX(-8.171664\, 1.749445\, 60.843327\, 49.955273)" +CPV,Cape Verde,"BBOX(-25.360555\, -22.666109\, 17.192364\, 14.811109)" +CIV,Cote d'Ivoire,"BBOX(-8.606382\, -2.487782\, 10.735254\, 4.344718)" +GHA,Ghana,"BBOX(-3.248891\, 1.202782\, 11.155691\, 4.727082)" +GIB,Gibraltar,"BBOX(-5.356173\, -5.334509\, 36.163309\, 36.112073)" +LBR,Liberia,"BBOX(-11.492327\, -7.368400\, 8.512782\, 4.343609)" +MAR,Morocco,"BBOX(-13.174964\, -1.011809\, 35.919164\, 27.664236)" +PRT,Portugal,"BBOX(-31.289027\, -6.190455\, 42.150673\, 32.637500)" +ESP,Spain,"BBOX(-18.169864\, 4.316945\, 43.764300\, 27.637500)" +ESH,Western Sahara,"BBOX(-17.101527\, -8.666391\, 27.666954\, 20.764100)" +BFA,Burkina Faso,"BBOX(-5.520837\, 2.397927\, 15.082773\, 9.395691)" +GIN,Guinea,"BBOX(-15.080837\, -7.653373\, 12.677500\, 7.193927)" +GNB,Guinea-Bissau,"BBOX(-16.717773\, -13.643891\, 12.684718\, 10.925100)" +MLI,Mali,"BBOX(-12.244837\, 4.251391\, 25.000273\, 10.142154)" +MRT,Mauritania,"BBOX(-17.075555\, -4.806109\, 27.290454\, 14.725636)" +SEN,Senegal,"BBOX(-17.532782\, -11.369927\, 16.690618\, 12.301745)" +SLE,Sierra Leone,"BBOX(-13.295609\, -10.264309\, 9.997500\, 6.923609)" +GMB,The Gambia,"BBOX(-16.821664\, -13.798609\, 13.826391\, 13.059973)" +DJI,Djibouti,"BBOX(41.759854\, 43.420409\, 12.708327\, 10.942218)" +ERI,Eritrea,"BBOX(36.443282\, 43.121382\, 17.994882\, 12.363891)" +ETH,Ethiopia,"BBOX(32.991800\, 47.988245\, 14.883609\, 3.406664)" +MNG,Mongolia,"BBOX(87.761100\, 119.931509\, 52.142773\, 41.586654)" +SDN,Sudan,"BBOX(21.829100\, 38.607500\, 22.232218\, 3.493391)" +UGA,Uganda,"BBOX(29.574300\, 35.009718\, 4.222782\, -1.476109)" +ISR,Gaza Strip,"BBOX(34.216663\, 34.558891\, 31.596100\, 31.216545)" +IRQ,Iraq,"BBOX(38.794700\, 48.560691\, 37.383673\, 29.061664)" +ISR,Israel,"BBOX(34.267582\, 35.681109\, 33.270273\, 29.486709)" +JOR,Jordan,"BBOX(34.960418\, 39.301109\, 33.377591\, 29.188891)" +KAZ,Kazakhstan,"BBOX(46.499163\, 87.348209\, 55.442627\, 40.594436)" +NOR,Norway,"BBOX(4.789582\, 31.073536\, 71.154709\, 57.987918)" +RUS,Russia,"BBOX(-180.000000\, 180.000000\, 81.851927\, 41.196582)" +SWE,Sweden,"BBOX(11.113336\, 24.167009\, 69.060300\, 55.339164)" +ISR,West Bank,"BBOX(34.888191\, 35.570609\, 32.546391\, 31.350691)" +DZA,Algeria,"BBOX(-8.667218\, 11.986473\, 37.089854\, 18.976391)" +AND,Andorra,"BBOX(1.421391\, 1.781718\, 42.655964\, 42.436382)" +CMR,Cameroon,"BBOX(8.502363\, 16.207000\, 13.085000\, 1.654164)" +CAF,Central African Republic,"BBOX(14.418891\, 27.459718\, 11.000836\, 2.221264)" +LBY,Libya,"BBOX(9.311391\, 25.151663\, 33.171136\, 19.499064)" +MCO,Monaco,"BBOX(7.390900\, 7.439291\, 43.768300\, 43.727545)" +TUN,Tunisia,"BBOX(7.492218\, 11.581663\, 37.340409\, 30.234391)" +BEN,Benin,"BBOX(0.776663\, 3.855000\, 12.396654\, 6.218718)" +TCD,Chad,"BBOX(13.461945\, 24.002745\, 23.450554\, 7.458536)" +GNQ,Equatorial Guinea,"BBOX(8.424163\, 11.353891\, 3.763336\, 0.930154)" +KIR,Kiribati,"BBOX(-157.581700\, 172.947509\, 2.033054\, 1.335991)" +NER,Niger,"BBOX(0.166663\, 15.996663\, 23.522309\, 11.693273)" +NGA,Nigeria,"BBOX(2.692500\, 14.649654\, 13.891500\, 4.272845)" +STP,Sao Tome & Principe,"BBOX(6.465136\, 7.463473\, 1.701245\, 0.018336)" +TGO,Togo,"BBOX(-0.149764\, 1.797800\, 11.138536\, 6.100545)" +ALB,Albania,"BBOX(19.288536\, 21.053327\, 42.660345\, 39.645000)" +BIH,Bosnia & Herzegovina,"BBOX(15.740591\, 19.619782\, 45.265945\, 42.565827)" +HRV,Croatia,"BBOX(13.504791\, 19.425000\, 46.535827\, 42.399991)" +ITA,Italy,"BBOX(6.623963\, 18.514445\, 47.094582\, 36.649164)" +MKD,Macedonia,"BBOX(20.458818\, 23.030973\, 42.358954\, 40.855891)" +MLT,Malta,"BBOX(14.329100\, 14.570000\, 35.991936\, 35.800000)" +SMR,San Marino,"BBOX(12.406945\, 12.511109\, 43.986873\, 43.898682)" +SMN,Serbia & Montenegro,"BBOX(18.453327\, 23.005000\, 46.181109\, 41.849000)" +VTC,Vatican City,"BBOX(12.444473\, 12.457718\, 41.908391\, 41.900891)" +BGR,Bulgaria,"BBOX(22.365273\, 28.605136\, 44.224718\, 41.243045)" +CYP,Cyprus,"BBOX(32.269863\, 34.586036\, 35.688609\, 34.640273)" +EGY,Egypt,"BBOX(24.706800\, 36.895827\, 31.646945\, 21.994164)" +GEO,Georgia,"BBOX(40.002963\, 46.710818\, 43.584718\, 41.048045)" +GRC,Greece,"BBOX(19.640000\, 28.238045\, 41.747773\, 34.930545)" +LBN,Lebanon,"BBOX(35.100827\, 36.623745\, 34.647500\, 33.062082)" +SYR,Syria,"BBOX(35.614463\, 42.378327\, 37.290545\, 32.313609)" +TUR,Turkey,"BBOX(25.665827\, 44.820545\, 42.109991\, 35.818445)" +AUT,Austria,"BBOX(9.533573\, 17.166382\, 49.018745\, 46.407491)" +CZE,Czech Republic,"BBOX(12.093700\, 18.852218\, 51.052491\, 48.581382)" +DNK,Denmark,"BBOX(8.092918\, 15.149163\, 57.745973\, 54.561936)" +HUN,Hungary,"BBOX(16.111800\, 22.894800\, 48.576173\, 45.748327)" +POL,Poland,"BBOX(14.147636\, 24.143473\, 54.836036\, 49.002918)" +SVK,Slovakia,"BBOX(16.844718\, 22.558054\, 49.600827\, 47.737500)" +SVN,Slovenia,"BBOX(13.383473\, 16.607873\, 46.876245\, 45.425818)" +SJM,Svalbard,"BBOX(10.487918\, 33.637500\, 80.764163\, 74.343045)" +BEL,Belgium,"BBOX(2.541663\, 6.398200\, 51.501245\, 49.508882)" +FRA,France,"BBOX(-4.790282\, 9.562218\, 51.091109\, 41.364927)" +DEU,Germany,"BBOX(5.865000\, 15.033818\, 55.056527\, 47.274718)" +LIE,Liechtenstein,"BBOX(9.474636\, 9.633891\, 47.274545\, 47.057454)" +LUX,Luxembourg,"BBOX(5.734445\, 6.524027\, 50.181809\, 49.448464)" +NLD,Netherlands,"BBOX(3.370863\, 7.210973\, 53.465827\, 50.753882)" +CHE,Switzerland,"BBOX(5.967009\, 10.488209\, 47.806664\, 45.829436)" +USA,United States,"BBOX(-178.216555\, 179.775936\, 71.351436\, 18.925482)" +BLR,Belarus,"BBOX(23.165400\, 32.740054\, 56.167491\, 51.251845)" +EST,Estonia,"BBOX(21.837354\, 28.194091\, 59.664718\, 57.522636)" +FIN,Finland,"BBOX(19.511391\, 31.581963\, 70.088609\, 59.806800)" +LVA,Latvia,"BBOX(20.968609\, 28.235963\, 58.083254\, 55.674836)" +LTU,Lithuania,"BBOX(20.942836\, 26.813054\, 56.449854\, 53.890336)" +MDA,Moldova,"BBOX(26.634991\, 30.128709\, 48.468318\, 45.448645)" +ROM,Romania,"BBOX(20.261027\, 29.672218\, 48.263882\, 43.623309)" +UKR,Ukraine,"BBOX(22.151445\, 40.178745\, 52.378600\, 44.379154)" +IND,India,"BBOX(68.144227\, 97.380536\, 35.505618\, 6.745827)" +MDV,Maldives,"BBOX(72.863391\, 73.637272\, 7.027773\, -0.641664)" +OMN,Oman,"BBOX(51.999291\, 59.847082\, 26.368709\, 16.642782)" +SOM,Somalia,"BBOX(40.988609\, 51.411318\, 11.979164\, -1.674873)" +LKA,Sri Lanka,"BBOX(79.696091\, 81.891663\, 9.828191\, 5.918054)" +TKM,Turkmenistan,"BBOX(51.250182\, 66.670882\, 42.796173\, 35.145991)" +UZB,Uzbekistan,"BBOX(55.997491\, 73.167545\, 45.570591\, 37.184991)" +YEM,Yemen,"BBOX(42.555973\, 54.473473\, 18.999345\, 12.144718)" +ARM,Armenia,"BBOX(43.454163\, 46.620536\, 41.297054\, 38.841145)" +AZE,Azerbaijan,"BBOX(44.778863\, 51.677009\, 42.710754\, 38.262809)" +BHR,Bahrain,"BBOX(50.453327\, 50.796391\, 26.288891\, 25.571945)" +IRN,Iran,"BBOX(44.034954\, 63.330273\, 39.779154\, 25.075973)" +KWT,Kuwait,"BBOX(46.546945\, 48.416591\, 30.084164\, 28.538882)" +QAT,Qatar,"BBOX(50.751936\, 51.615827\, 26.152500\, 24.556045)" +SAU,Saudi Arabia,"BBOX(34.572145\, 55.666109\, 32.154945\, 16.377500)" +ARE,United Arab Emirates,"BBOX(51.583327\, 56.381663\, 26.083882\, 22.633327)" +AFG,Afghanistan,"BBOX(60.504163\, 74.915736\, 38.471982\, 29.406109)" +KGZ,Kyrgyzstan,"BBOX(69.249500\, 80.281582\, 43.216900\, 39.195473)" +NPL,Nepal,"BBOX(80.052200\, 88.194554\, 30.424718\, 26.368364)" +PAK,Pakistan,"BBOX(60.866300\, 77.823927\, 37.060791\, 23.688045)" +TJK,Tajikistan,"BBOX(67.364700\, 75.187482\, 41.049254\, 36.671845)" +BGD,Bangladesh,"BBOX(88.043872\, 92.669345\, 26.626136\, 20.744818)" +BTN,Bhutan,"BBOX(88.751936\, 92.114218\, 28.325000\, 26.703609)" +BRN,Brunei,"BBOX(114.095082\, 115.360263\, 5.053054\, 4.018191)" +CHN,China,"BBOX(73.620045\, 134.768463\, 53.553745\, 18.168882)" +JPN,Japan,"BBOX(123.678863\, 145.812409\, 45.486382\, 24.251391)" +PRK,North Korea,"BBOX(124.323954\, 130.697418\, 43.006100\, 37.671382)" +PLW,Palau,"BBOX(134.452482\, 134.658872\, 7.729445\, 7.305254)" +PHL,Philippines,"BBOX(116.950000\, 126.598036\, 19.391109\, 5.049164)" +KOR,South Korea,"BBOX(126.099018\, 129.586872\, 38.625245\, 33.192209)" +KHM,Cambodia,"BBOX(102.346509\, 107.636382\, 14.708618\, 10.422736)" +LAO,Laos,"BBOX(100.091372\, 107.695254\, 22.499927\, 13.926664)" +MYS,Malaysia,"BBOX(99.641936\, 119.275818\, 7.352918\, 0.852782)" +MMR,Myanmar,"BBOX(92.204991\, 101.169427\, 28.546527\, 9.839582)" +SGP,Singapore,"BBOX(103.640945\, 103.997945\, 1.445282\, 1.259027)" +THA,Thailand,"BBOX(97.347272\, 105.639291\, 20.454582\, 5.633473)" +VNM,Vietnam,"BBOX(102.140745\, 109.464845\, 23.324164\, 8.559236)" +GUM,Guam,"BBOX(144.634154\, 144.953309\, 13.652291\, 13.235000)" +MHL,Marshall Is.,"BBOX(162.324963\, 171.378063\, 14.594027\, 5.600273)" +FSM,Micronesia,"BBOX(158.120100\, 163.042891\, 6.977636\, 5.261664)" +MNP,Northern Mariana Is.,"BBOX(145.572682\, 145.818082\, 15.268191\, 14.908054)" +UMI,Wake I.,"BBOX(166.608981\, 166.662200\, 19.324582\, 19.279445)" +BWA,Botswana,"BBOX(19.996109\, 29.373618\, -17.782082\, -26.875555)" +BDI,Burundi,"BBOX(28.985000\, 30.853191\, -2.301564\, -4.448055)" +ATF,French Southern & Antarctic Lands,"BBOX(51.650836\, 70.567491\, -46.327645\, -49.725009)" +HMD,Heard I. & McDonald Is.,"BBOX(73.234709\, 73.773882\, -52.965145\, -53.199445)" +KEN,Kenya,"BBOX(33.907218\, 41.905163\, 4.622500\, -4.669618)" +RWA,Rwanda,"BBOX(28.854445\, 30.893263\, -1.054446\, -2.825491)" +TZA,Tanzania,"BBOX(29.340827\, 40.436809\, -0.997218\, -11.740418)" +ZMB,Zambia,"BBOX(21.996391\, 33.702282\, -8.191664\, -18.074918)" +ZWE,Zimbabwe,"BBOX(25.237918\, 33.071591\, -15.616527\, -22.414764)" +ATA,Antarctica,"BBOX(-180.000000\, 180.000000\, -60.503336\, -90.000000)" +NOR,Bouvet I.,"BBOX(3.342363\, 3.484163\, -54.383609\, -54.462782)" +COM,Comoros,"BBOX(43.214027\, 44.530418\, -11.366946\, -12.383055)" +REU,Juan De Nova I.,"BBOX(42.723818\, 42.760900\, -17.052018\, -17.076118)" +LSO,Lesotho,"BBOX(27.013973\, 29.455554\, -28.570691\, -30.650527)" +MWI,Malawi,"BBOX(32.681873\, 35.920963\, -9.376673\, -17.135282)" +MOZ,Mozambique,"BBOX(30.213018\, 40.846109\, -10.471109\, -26.860282)" +ZAF,South Africa,"BBOX(16.483327\, 37.892218\, -22.136391\, -46.969727)" +SWZ,Swaziland,"BBOX(30.798336\, 32.133400\, -25.728336\, -27.316391)" +AGO,Angola,"BBOX(11.731245\, 24.084445\, -4.388991\, -18.016391)" +COG,Congo,"BBOX(11.140663\, 18.643609\, 3.711109\, -5.015000)" +ZAR,Congo\, DRC,"BBOX(12.214554\, 31.302773\, 5.380691\, -13.458055)" +FJI,Fiji,"BBOX(-180.000000\, 180.000000\, -16.153473\, -19.162782)" +GAB,Gabon,"BBOX(8.700836\, 14.519582\, 2.317900\, -3.925282)" +NAM,Namibia,"BBOX(11.716391\, 25.264427\, -16.954173\, -28.961873)" +NZL,New Zealand,"BBOX(-176.848755\, 178.841063\, -34.414718\, -52.578055)" +IOT,British Indian Ocean Territory,"BBOX(72.357900\, 72.494282\, -7.233473\, -7.436246)" +REU,Glorioso Is.,"BBOX(47.279091\, 47.303054\, -11.554100\, -11.577782)" +MDG,Madagascar,"BBOX(43.236827\, 50.501391\, -11.945555\, -25.588336)" +MUS,Mauritius,"BBOX(57.306309\, 63.495754\, -19.673336\, -20.520555)" +MYT,Mayotte,"BBOX(45.039163\, 45.293345\, -12.662500\, -12.992500)" +REU,Reunion,"BBOX(55.220554\, 55.853054\, -20.856527\, -21.373891)" +SYC,Seychelles,"BBOX(46.205691\, 55.540554\, -4.551664\, -9.463055)" +CXR,Christmas I.,"BBOX(105.629000\, 105.751900\, -10.384082\, -10.510973)" +CCK,Cocos Is.,"BBOX(96.817491\, 96.864845\, -12.130418\, -12.199446)" +IDN,Indonesia,"BBOX(95.210945\, 141.007018\, 5.913473\, -10.929655)" +TLS,Timor Leste,"BBOX(124.046100\, 127.308591\, -8.140000\, -9.463627)" +AUS,Australia,"BBOX(112.907209\, 158.960372\, -10.135691\, -54.753891)" +NRU,Nauru,"BBOX(166.904418\, 166.957045\, -0.493336\, -0.552218)" +NCL,New Caledonia,"BBOX(163.982745\, 168.130509\, -20.087918\, -22.673891)" +NFK,Norfolk I.,"BBOX(167.910945\, 167.998872\, -29.000555\, -29.081109)" +PNG,Papua New Guinea,"BBOX(140.858854\, 155.966845\, -1.355282\, -11.642500)" +SLB,Solomon Is.,"BBOX(155.671300\, 166.931836\, -6.605518\, -11.845836)" +TUV,Tuvalu,"BBOX(176.295254\, 179.232281\, -6.089446\, -8.561291)" +VUT,Vanuatu,"BBOX(166.521636\, 169.893863\, -13.707218\, -20.254173)" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox_web.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox_web.csv new file mode 100644 index 0000000000000..aa540d40ad604 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox_web.csv @@ -0,0 +1,249 @@ +id:keyword,name:keyword,shape:cartesian_shape +FLK, Falkland Is., "BBOX(-6806970.344651548\, -6426830.424971599\, -6665538.61144021\, -6862393.473674134)" +GUF, French Guiana, "BBOX(-6078465.2067807885\, -5749435.182262659\, 641770.3972926841\, 235324.11002137093)" +GUY, Guyana, "BBOX(-6833873.148626795\, -6286282.4434172455\, 953676.0708782381\, 132131.5480264357)" +PCN, Pitcairn Is., "BBOX(-1.448322847021477E7\, -1.428074532961791E7\, -2793163.209148463\, -2885847.742584221)" +SGS, South Georgia & the South Sandwich Is., "BBOX(-4232785.044058981\, -2921178.2834205604\, -7168210.949791082\, -8072797.261021951)" +SHN, St. Helena, "BBOX(-644849.5424266771\, -628429.9175369549\, -1793579.7338931332\, -1807264.3754193506)" +SUR, Suriname, "BBOX(-6464478.676752644\, -6009707.164829022\, 669343.5434865113\, 204444.85915446977)" +TTO, Trinidad & Tobago, "BBOX(-6893080.980145244\, -6737148.644965401\, 1271316.8958092\, 1123450.7268402777)" +VEN, Venezuela, "BBOX(-8168408.718739186\, -6657245.629555437\, 1368193.4618250781\, 72266.15206230174)" +ASM, American Samoa, "BBOX(-1.901595464265674E7\, -1.8986860848464E7\, -1603409.0061145446\, -1617338.5456514952)" +COK, Cook Is., "BBOX(-1.8462153311737206E7\, -1.755550270221884E7\, -1218650.60324631\, -2504423.3700605934)" +PYF, French Polynesia, "BBOX(-1.686465494432737E7\, -1.5452231241588091E7\, -981029.2116948966\, -2022435.6471389162)" +UMI, Jarvis I., "BBOX(-1.781614615792593E7\, -1.7812172052105166E7\, -41668.183668037316\, -44311.636360225275)" +NIU, Niue, "BBOX(-1.8918996368064713E7\, -1.889999624605927E7\, -2150619.798091522\, -2172080.175292089)" +WSM, Samoa, "BBOX(-1.9233784622210693E7\, -1.9083411248441823E7\, -1512399.049561015\, -1580814.199108954)" +TKL, Tokelau, "BBOX(-1.913167025144482E7\, -1.912956030181662E7\, -1025256.50252298\, -1030699.159919998)" +TON, Tonga, "BBOX(-1.9520985902791113E7\, -1.9359219424419094E7\, -2104146.80131666\, -2423871.209298853)" +WLF, Wallis & Futuna, "BBOX(-1.98360504519132E7\, -1.9605804230316367E7\, -1484290.6690231054\, -1611402.1249494848)" +ARG, Argentina, "BBOX(-8191144.166257678\, -5972291.682103194\, -2485194.106818803\, -7371901.253043402)" +BOL, Bolivia, "BBOX(-7754091.711639628\, -6403221.564728467\, -1082644.4605265881\, -2620063.8163838163)" +BRA, Brazil, "BBOX(-8238153.385337716\, -3873129.9144329783\, 587785.5079629741\, -3994093.243498929)" +CHL, Chile, "BBOX(-1.2183485121489162E7\, -7393910.374780716\, -1979723.0325789037\, -7538976.386388264)" +ECU, Ecuador, "BBOX(-1.0203977668829728E7\, -8373100.994630531\, 160069.96058917182\, -557339.7863215066)" +PRY, Paraguay, "BBOX(-6973472.910758704\, -6038403.325800699\, -2189911.7242244524\, -3196717.5348766074)" +PER, Peru, "BBOX(-9056413.424871765\, -7644744.579599449\, -4104.683866786337\, -2078385.864447083)" +URY, Uruguay, "BBOX(-6505356.195641661\, -5910875.717165678\, -3515982.318158614\, -4156248.8527274607)" +UMI, Baker I., "BBOX(-1.964428949334857E7\, -1.9642975923357394E7\, 24776.775336047573\, 23965.139003268785)" +CAN, Canada, "BBOX(-1.5696381156263582E7\, -5857338.166548977\, 1.7926778413967136E7\, 5112502.227274475)" +GTM, Guatemala, "BBOX(-1.0268864798128676E7\, -9820019.490616102\, 2016620.2477192462\, 1545072.9951440636)" +UMI, Howland I., "BBOX(-1.966381793765724E7\, -1.9662483105643325E7\, 90016.93033465231\, 87976.57940884378)" +UMI, Johnston Atoll, "BBOX(-1.8872988022526257E7\, -1.8871317228289172E7\, 1889449.6904405674\, 1888739.592498257)" +MEX, Mexico, "BBOX(-1.3180691242448486E7\, -9655698.786528189\, 3857992.7910224693\, 1637455.8925958527)" +UMI, Midway Is., "BBOX(-1.9747615131493594E7\, -1.974368555346914E7\, 3276930.956339718\, 3272211.297114333)" +BRB, Barbados, "BBOX(-6641259.148804331\, -6615392.506649243\, 1498269.4980028346\, 1465508.5364990495)" +DMA, Dominica, "BBOX(-6845190.333337227\, -6818396.733782433\, 1762138.8493679555\, 1712035.77580254)" +GRD, Grenada, "BBOX(-6877894.997852321\, -6856878.879868893\, 1372710.0161931934\, 1345360.731534649)" +GLP, Guadeloupe, "BBOX(-6879111.38592805\, -6811314.810418132\, 1864198.7087877272\, 1789672.9198651556)" +MTQ, Martinique, "BBOX(-6816263.407061167\, -6770111.459379609\, 1675390.1030315096\, 1620466.564996925)" +LCA, St. Lucia, "BBOX(-6799347.965159521\, -6776915.084016965\, 1586760.2747788534\, 1540902.846138527)" +SPM, St. Pierre & Miquelon, "BBOX(-6278172.373236121\, -6250088.469463722\, 5964272.744483719\, 5900906.394026551)" +VCT, St. Vincent & the Grenadines, "BBOX(-6821674.647507875\, -6803878.668434177\, 1503545.1028787405\, 1474620.605161206)" +ABW, Aruba, "BBOX(-7799006.120542209\, -7778434.278646477\, 1417237.7724451458\, 1392531.3743975367)" +BMU, Bermuda, "BBOX(-7216070.475135298\, -7199789.443011595\, 3813230.825275473\, 3797561.1925476543)" +DOM, Dominican Republic, "BBOX(-8015344.418919742\, -7605673.442087284\, 2264838.2331280783\, 1991268.1942175906)" +HTI, Haiti, "BBOX(-8289716.573465983\, -7973724.065068766\, 2283868.061303094\, 2040215.3097965734)" +JAM, Jamaica, "BBOX(-8724542.638268478\, -8484896.042272912\, 2098797.886578782\, 2002138.6713165536)" +ANT, Netherlands Antilles, "BBOX(-7699258.7361087445\, -7591201.908286172\, 1389429.1415046235\, 1348047.674912462)" +BHS, The Bahamas, "BBOX(-8791890.930189032\, -8097256.305860282\, 3114624.5106054945\, 2381778.6607825435)" +TCA, Turks & Caicos Is., "BBOX(-8018505.892457832\, -7917885.206619215\, 2506456.133236025\, 2443216.1674464582)" +BLZ, Belize, "BBOX(-9931524.217026532\, -9771579.370801603\, 2094970.9791089285\, 1791970.7485571986)" +CYM, Cayman Is., "BBOX(-9061499.6124054\, -9027238.590089742\, 2196677.690165189\, 2186160.351965059)" +COL, Colombia, "BBOX(-9097045.039005652\, -7443984.998678304\, 1412960.1248500098\, -472076.97756910085)" +CRI, Costa Rica, "BBOX(-9563612.298130559\, -9190693.005900422\, 1256252.842749445\, 896349.8334170822)" +CUB, Cuba, "BBOX(-9456916.57372173\, -8252253.557317591\, 2655499.846135876\, 2251949.753820664)" +SLV, El Salvador, "BBOX(-1.0030783799451409E7\, -9762126.342283737\, 1623823.8238794443\, 1477605.2302434247)" +HND, Honduras, "BBOX(-9946451.158864416\, -9254195.76601206\, 1855249.5859095547\, 1458038.3723417278)" +NIC, Nicaragua, "BBOX(-9761586.888031427\, -9254195.76601206\, 1691760.81737009\, 1199200.9443015517)" +PAN, Panama, "BBOX(-9242889.713250706\, -8593679.45241179\, 1075976.1383535631\, 804303.6245583462)" +AIA, Anguilla, "BBOX(-7031805.325801677\, -7010089.898777183\, 2069525.485454939\, 2056805.549131826)" +ATG, Antigua & Barbuda, "BBOX(-6889686.737551939\, -6864733.02654072\, 2005303.4210994085\, 1919628.1877410556)" +VGB, British Virgin Is., "BBOX(-7202202.070335221\, -7160573.590161418\, 2096726.335695059\, 2082531.6290789556)" +MSR, Montserrat, "BBOX(-6928185.136284053\, -6917269.703615838\, 1898992.8327456792\, 1882606.3105989075)" +PRI, Puerto Rico, "BBOX(-7488061.394454311\, -7269287.202979579\, 2098439.2297828426\, 2028446.302847273)" +KNA, St. Kitts & Nevis, "BBOX(-6997852.881114455\, -6971105.813106805\, 1968620.0064461157\, 1945153.7466145495)" +VIR, Virgin Is., "BBOX(-7238383.9104642505\, -7187072.749663104\, 2082975.2861753216\, 1999737.0895242055)" +FRO, Faroe Is., "BBOX(-827490.42907036\, -711300.1539736006\, 8944413.838654397\, 8715539.142798016)" +GRL, Greenland, "BBOX(-8132290.553358883\, -1353381.9599010698\, 1.841838614386466E7\, 8353191.775986784)" +XGK, Guernsey, "BBOX(-297068.19496499473\, -278407.0408089712\, 6361534.846607885\, 6346855.715083607)" +ISL, Iceland, "BBOX(-2731602.192501422\, -1502751.454502109\, 1.0025136653899286E7\, 9196525.03584683)" +IRL, Ireland, "BBOX(-1166041.2756762397\, -669370.2206187705\, 7435966.643781227\, 6700487.126114637)" +XIM, Isle of Man, "BBOX(-532903.6568742928\, -479640.2861633771\, 7249411.799394163\, 7180682.877256964)" +SJM, Jan Mayen, "BBOX(-1015223.6258196725\, -882597.5845070281\, 1.1464383304063711E7\, 1.1335539300648466E7)" +XJE, Jersey, "BBOX(-250175.41607230977\, -224308.77391722222\, 6319282.822387621\, 6303377.056271344)" +GBR, United Kingdom, "BBOX(-909665.4752870986\, 194747.32654372943\, 8589937.148187652\, 6438533.511709376)" +CPV, Cape Verde, "BBOX(-2823124.068441826\, -2523179.7117936057\, 1943228.8819694468\, 1667440.6983404886)" +CIV, Cote d'Ivoire, "BBOX(-958058.0616790326\, -276938.62540612154\, 1202097.1729137793\, 484115.97315150854)" +GHA, Ghana, "BBOX(-361664.8917125052\, 133893.0797566771\, 1249767.3181259448\, 526814.3511759888)" +GIB, Gibraltar, "BBOX(-596246.4508776823\, -593834.8254294725\, 4323115.767768943\, 4316053.421468498)" +LBR, Liberia, "BBOX(-1279319.9894917065\, -820246.5358469777\, 951144.4190395237\, 483992.16413836647)" +MAR, Morocco, "BBOX(-1466630.283495554\, -112634.06264437255\, 4289504.155676036\, 3206707.2043454945)" +PRT, Portugal, "BBOX(-3483078.5525721395\, -689118.2982827483\, 5183576.317394064\, 3847286.4078652565)" +ESP, Spain, "BBOX(-2022660.0079814764\, 480560.1191156738\, 5429039.221465501\, 3203347.2301618545)" +ESH, Western Sahara, "BBOX(-1903733.2771624175\, -964738.2330011163\, 3207048.827624554\, 2363772.158427126)" +BFA, Burkina Faso, "BBOX(-614576.7635071143\, 266936.0125622843\, 1698741.2811715933\, 1050643.0120585556)" +GIN, Guinea, "BBOX(-1678791.0953426699\, -851969.5850923934\, 1422911.1290510038\, 802936.7522689679)" +GNB, Guinea-Bissau, "BBOX(-1861013.9772984823\, -1518830.9983475052\, 1423734.7230846898\, 1223613.9918118552)" +MLI, Mali, "BBOX(-1363089.019496892\, 473262.6812172274\, 2875778.1558879707\, 1134962.1365298633)" +MRT, Mauritania, "BBOX(-1900842.0873479373\, -535013.6065024948\, 3159807.24053085\, 1657600.8186799039)" +SEN, Senegal, "BBOX(-1951740.3641577882\, -1265694.4838205066\, 1884840.6777415504\, 1380068.3247828495)" +SLE, Sierra Leone, "BBOX(-1480060.423460439\, -1142617.6510657615\, 1118607.3838558097\, 772615.2434245716)" +GMB, The Gambia, "BBOX(-1872579.0705148762\, -1536054.1273216614\, 1554306.33090056\, 1466584.8753009895)" +DJI, Djibouti, "BBOX(4648685.682234346\, 4833537.819242839\, 1426428.7393574219\, 1225554.7892715929)" +ERI, Eritrea, "BBOX(4056847.594510955\, 4800250.285874032\, 2036949.5002702742\, 1387149.8027029647)" +ETH, Ethiopia, "BBOX(3672630.3758422886\, 5342026.99671924\, 1675790.1336981696\, 379451.74027328007)" +MNG, Mongolia, "BBOX(9769520.962097632\, 1.3350714510090472E7\, 6825981.925445475\, 5099261.916823782)" +SDN, Sudan, "BBOX(2430004.2961371886\, 4297767.240203056\, 2539428.7064047027\, 389123.6754710965)" +UGA, Uganda, "BBOX(3292196.0161092333\, 3897263.9800336002\, 470504.09041435266\, -164337.88255462408)" +ISR, Gaza Strip, "BBOX(3808981.5012748297\, 3847078.1479647276\, 3710408.4677697835\, 3660903.6805555364)" +IRQ, Iraq, "BBOX(4318606.2488766555\, 5405751.393937016\, 4492721.642260634\, 3383496.8234396563)" +ISR, Israel, "BBOX(3814649.7784257433\, 3972002.8842663835\, 3931233.3769460395\, 3437740.2376509146)" +JOR, Jordan, "BBOX(3891775.929138256\, 4374979.440881939\, 3945530.7721081185\, 3399709.663800458)" +KAZ, Kazakhstan, "BBOX(5176263.146752886\, 9723558.146230904\, 7448249.257062752\, 4952703.862043582)" +NOR, Norway, "BBOX(533173.8292784104\, 3459090.2041849457\, 1.1455379410923388E7\, 7964779.911100031)" +RUS, Russia, "BBOX(-2.003750834E7\, 2.003750834E7\, 1.6850434409817755E7\, 5041380.846897432)" +SWE, Sweden, "BBOX(1237130.9043623458\, 2690259.1355019724\, 1.0769543191624273E7\, 7427971.135671626)" +ISR, West Bank, "BBOX(3883735.6562778493\, 3959702.080535439\, 3835248.5789866336\, 3678377.284759022)" +DZA, Algeria, "BBOX(-964830.2942199894\, 1334328.0705815821\, 4451638.686907341\, 2152156.534692522)" +AND, Andorra, "BBOX(158228.52231611632\, 198339.94046960064\, 5259751.808527718\, 5226573.156424563)" +CMR, Cameroon, "BBOX(946478.719567819\, 1804154.9870354445\, 1469444.988943757\, 184166.28005485257)" +CAF, Central African Republic, "BBOX(1605103.603700283\, 3056801.8246613783\, 1232201.6067875316\, 247331.9412217624)" +LBY, Libya, "BBOX(1036539.304552783\, 2799870.317374274\, 3918041.4975678376\, 2213781.647695001)" +MCO, Monaco, "BBOX(822751.2243894777\, 828138.0858677052\, 5429655.8071539095\, 5423375.498489419)" +TUN, Tunisia, "BBOX(834029.8925561006\, 1289264.82751983\, 4486662.225217784\, 3533714.341264127)" +BEN, Benin, "BBOX(86457.72966594121\, 429136.6369483333\, 1390883.792858654\, 693627.7186615759)" +TCD, Chad, "BBOX(1498576.8622784517\, 2671973.3506688518\, 2686597.2252112613\, 832635.3730826946)" +GNQ, Equatorial Guinea, "BBOX(937773.5353889967\, 1263909.364466394\, 419234.1992921709\, 103548.81812163288)" +KIR, Kiribati, "BBOX(-1.7541914599896543E7\, 1.9252428633165136E7\, 226366.04306531145\, 148735.3163895852)" +NER, Niger, "BBOX(18552.840291496777\, 1780740.379303719\, 2695306.478633392\, 1310820.5810745189)" +NGA, Nigeria, "BBOX(299727.7289191666\, 1630792.0233506353\, 1561771.5570046515\, 476092.4293577717)" +STP, Sao Tome & Principe, "BBOX(719695.6473290791\, 830830.0137936934\, 189409.56079307984\, 2041.1542177410504)" +TGO, Togo, "BBOX(-16671.65221684311\, 200130.18052028888\, 1247820.9113916112\, 680396.3710024672)" +ALB, Albania, "BBOX(2147190.0053688344\, 2343645.64081804\, 5260414.963633992\, 4814487.957249004)" +BIH, Bosnia & Herzegovina, "BBOX(1752234.5746612719\, 2184064.14141101\, 5663486.702317411\, 5246118.059706764)" +HRV, Croatia, "BBOX(1503346.4571803163\, 2162381.1083583334\, 5866635.618622956\, 5221085.75286942)" +ITA, Italy, "BBOX(737376.1880908412\, 2061018.5894331736\, 5957525.94908941\, 4390316.944679211)" +MKD, Macedonia, "BBOX(2277465.201675234\, 2563796.186476749\, 5214901.594868669\, 4991108.7995952675)" +MLT, Malta, "BBOX(1595108.1153038554\, 1621924.980632222\, 4299511.834205549\, 4273136.461790226)" +SMR, San Marino, "BBOX(1381134.799507896\, 1392730.2829452723\, 5463410.973754562\, 5449776.352704761)" +SMN, Serbia & Montenegro, "BBOX(2054214.9647958176\, 2560904.8853427777\, 5809419.7157107135\, 5138387.144313233)" +VTC, Vatican City, "BBOX(1385312.3973578045\, 1386786.8240131561\, 5147266.721875869\, 5146144.937762506)" +BGR, Bulgaria, "BBOX(2489690.801465982\, 3184309.173149079\, 5500283.923251328\, 5048257.112102198)" +CYP, Cyprus, "BBOX(3592264.716628652\, 3850099.91554189\, 4257858.611081361\, 4115102.5028513763)" +EGY, Egypt, "BBOX(2750348.3947484\, 4107224.6734649837\, 3717055.3733837567\, 2510824.567439936)" +GEO, Georgia, "BBOX(4453109.470762285\, 5199824.4735734565\, 5401399.644378745\, 5019430.87461186)" +GRC, Greece, "BBOX(2186314.7988755554\, 3143444.7899599737\, 5123271.623236523\, 4154446.48763015)" +LBN, Lebanon, "BBOX(3907406.1875188733\, 4076936.6437751846\, 4116080.386414876\, 3903547.2121638493)" +SYR, Syria, "BBOX(3964583.8854840077\, 4717533.78165415\, 4479682.761680629\, 3804547.447187875)" +TUR, Turkey, "BBOX(2857106.79203054\, 4989400.245782474\, 5177469.827842194\, 4275668.354346954)" +AUT, Austria, "BBOX(1061272.4916527711\, 1910952.9027368103\, 6278042.62617315\, 5845892.142474166)" +CZE, Czech Republic, "BBOX(1346264.5256192111\, 2098619.3077916563\, 6630584.029505155\, 6204126.892396778)" +DNK, Denmark, "BBOX(900899.5106663116\, 1686397.1108695522\, 7914142.641677729\, 7277306.821832056)" +HUN, Hungary, "BBOX(1793557.3715133998\, 2548637.4774590665\, 6203250.422795402\, 5740109.762720737)" +POL, Poland, "BBOX(1574907.6352293568\, 2687639.1199670266\, 7330108.850656106\, 6275356.531185668)" +SVK, Slovakia, "BBOX(1875145.4300552672\, 2511151.0842176126\, 6377430.961535088\, 6063294.76382884)" +SVN, Slovenia, "BBOX(1489841.399198138\, 1848779.9652620046\, 5921897.448055978\, 5688808.783113411)" +SJM, Svalbard, "BBOX(1167509.6910790894\, 3744509.3710375\, 1.6048121551074298E7\, 1.2655555793739378E7)" +BEL, Belgium, "BBOX(282936.63088871894\, 712244.3658943777\, 6710441.719074484\, 6361653.309031685)" +FRA, France, "BBOX(-533251.7529219548\, 1064461.2384661005\, 6637425.700005567\, 5066318.240535327)" +DEU, Germany, "BBOX(652888.8134116667\, 1673556.9642057894\, 7372844.587967681\, 5987030.890923241)" +LIE, Liechtenstein, "BBOX(1054711.6548248013\, 1072439.8403286163\, 5987002.506696636\, 5951457.074129165)" +LUX, Luxembourg, "BBOX(638355.4972931738\, 726251.3634604733\, 6477821.694262034\, 6351301.791746342)" +NLD, Netherlands, "BBOX(375242.7526416523\, 802721.8423723045\, 7069632.465484033\, 6577873.226207013)" +CHE, Switzerland, "BBOX(664244.403346417\, 1167542.0850509058\, 6074750.670815664\, 5753058.221661312)" +USA, United States, "BBOX(-1.9838976150769826E7\, 2.001256564961837E7\, 1.1523520412740182E7\, 2146164.589200235)" +BLR, Belarus, "BBOX(2578760.5316635333\, 3644606.1393169463\, 7591830.885400406\, 6665963.6751351105)" +EST, Estonia, "BBOX(2430923.1272140685\, 3138551.853062327\, 8325466.382266233\, 7867699.765386352)" +FIN, Finland, "BBOX(2171998.1104861163\, 3515688.0389226186\, 1.1097617254588177E7\, 8356849.0793245975)" +LVA, Latvia, "BBOX(2334214.876198328\, 3143213.0227801744\, 7984826.971795753\, 7493955.154644284)" +LTU, Lithuania, "BBOX(2331345.838962512\, 2984815.5174770574\, 7648495.086573079\, 7149414.5404388225)" +MDA, Moldova, "BBOX(2964993.634990694\, 3353912.54367185\, 6185122.9269956285\, 5692430.167578349)" +ROM, Romania, "BBOX(2255447.2082748064\, 3303096.1980072116\, 6150868.213605207\, 5407332.237900151)" +UKR, Ukraine, "BBOX(2465887.5773919513\, 4472677.433490184\, 6868872.82154549\, 5524305.8506691335)" +IND, India, "BBOX(7585780.649085295\, 1.0840351679187058E7\, 4232806.675603967\, 752682.9865532124)" +MDV, Maldives, "BBOX(8111115.582462115\, 8197263.621304713\, 784297.2010665077\, -71431.20290758506)" +OMN, Oman, "BBOX(5788534.594925483\, 6662146.69277591\, 3044819.2631402686\, 1879282.0779841878)" +SOM, Somalia, "BBOX(4562831.081569439\, 5723081.7399744\, 1343337.2289440092\, -186472.5685638059)" +LKA, Sri Lanka, "BBOX(8871728.267099438\, 9116138.224105384\, 1099474.3430723047\, 659969.3086218301)" +TKM, Turkmenistan, "BBOX(5705144.162508433\, 7421768.6339453105\, 5280998.579824433\, 4183738.4781891424)" +UZB, Uzbekistan, "BBOX(6233612.182953193\, 8144973.85086014\, 5711801.139928842\, 4464923.610179015)" +YEM, Yemen, "BBOX(4737309.24391286\, 6063959.275257026\, 2154858.799301538\, 1362182.6880406907)" +ARM, Armenia, "BBOX(4837295.297334552\, 5189774.327307057\, 5056256.290729958\, 4698942.432854185)" +AZE, Azerbaijan, "BBOX(4984760.226767874\, 5752658.326798638\, 5268048.77475221\, 4616618.723595905)" +BHR, Bahrain, "BBOX(5616438.669684706\, 5654628.379468894\, 3034905.550106453\, 2946160.3652355284)" +IRN, Iran, "BBOX(4901948.6557028685\, 7049893.741177648\, 4833901.247983729\, 2885079.0840316075)" +KWT, Kuwait, "BBOX(5181582.214661229\, 5389710.255315938\, 3514372.934498193\, 3317085.938189461)" +QAT, Qatar, "BBOX(5649679.671506368\, 5745847.577713873\, 3017981.013632691\, 2821312.488451719)" +SAU, Saudi Arabia, "BBOX(3848553.5764954956\, 6196722.907460272\, 3783666.794569951\, 1848481.0463722278)" +ARE, United Arab Emirates, "BBOX(5742229.694263595\, 6276378.014364274\, 3009473.8025495554\, 2587735.5585281393)" +AFG, Afghanistan, "BBOX(6735292.615095663\, 8339581.582762433\, 4646317.28372925\, 3427436.851842879)" +KGZ, Kyrgyzstan, "BBOX(7708819.076615721\, 8936904.82707441\, 5345044.727405903\, 4749710.205362992)" +NPL, Nepal, "BBOX(8911370.139640821\, 9817772.840653224\, 3558261.041954822\, 3044776.39805181)" +PAK, Pakistan, "BBOX(6775605.521527455\, 8663319.92396695\, 4447583.65883328\, 2715440.846640232)" +TJK, Tajikistan, "BBOX(7499004.100397766\, 8369832.209103333\, 5019609.3336218465\, 4393464.385496015)" +BGD, Bangladesh, "BBOX(9800998.997143846\, 1.0315904296110207E7\, 3076839.5287209633\, 2361476.7409209567)" +BTN, Bhutan, "BBOX(9879820.321061922\, 1.025410784115321E7\, 3290010.9896438504\, 3086490.161301852)" +BRN, Brunei, "BBOX(1.2701006428488798E7\, 1.2841845733150518E7\, 563234.0022074429\, 447670.0898939893)" +CHN, China, "BBOX(8195345.9204370845\, 1.5002356674063785E7\, 7086089.890077106\, 2057325.3856844143)" +JPN, Japan, "BBOX(1.3767868049134541E7\, 1.623176311896106E7\, 5698420.16133248\, 2784071.2548644035)" +PRK, North Korea, "BBOX(1.3839679250759868E7\, 1.4549170017730366E7\, 5312900.3745006835\, 4533106.558340659)" +PLW, Palau, "BBOX(1.4967181830048332E7\, 1.4990157059749957E7\, 863059.693444481\, 815429.4880146481)" +PHL, Philippines, "BBOX(1.3018814446461111E7\, 1.4092828900986778E7\, 2201037.2202695687\, 562799.2811739098)" +KOR, South Korea, "BBOX(1.4037278471337833E7\, 1.4425544602525068E7\, 4668132.414354527\, 3920844.3714562915)" +KHM, Cambodia, "BBOX(1.139316126476325E7\, 1.1982027233402364E7\, 1655642.1223870981\, 1166706.2324655629)" +LAO, Laos, "BBOX(1.1142120562289124E7\, 1.1988580834463434E7\, 2571654.2509495416\, 1565804.2404149454)" +MYS, Malaysia, "BBOX(1.1092089575631922E7\, 1.32777233218629E7\, 820779.1279511156\, 94934.7631846226)" +MMR, Myanmar, "BBOX(1.0264212645289583E7\, 1.126212909591956E7\, 3318054.720285839\, 1100761.292465509)" +SGP, Singapore, "BBOX(1.1537257221127674E7\, 1.157699827933534E7\, 160905.1210847127\, 140165.52511697204)" +THA, Thailand, "BBOX(1.0836648747645825E7\, 1.1759712080245482E7\, 2326960.8760532974\, 628128.2178646458)" +VNM, Vietnam, "BBOX(1.137025572106285E7\, 1.2185570803468373E7\, 2671268.1479721097\, 956373.5794062541)" +GUM, Guam, "BBOX(1.610060037235469E7\, 1.613612854443387E7\, 1534354.7088998647\, 1486593.2644101644)" +MHL, Marshall Is., "BBOX(1.8069932221681617E7\, 1.9077718703641918E7\, 1642457.1731015244\, 624414.5801310536)" +FSM, Micronesia, "BBOX(1.76018490137313E7\, 1.8149851601056725E7\, 778674.0289479959\, 586550.7704269526)" +MNP, Northern Mariana Is., "BBOX(1.6205076831395375E7\, 1.6232394634432243E7\, 1720127.7032804906\, 1678605.9653024632)" +UMI, Wake I., "BBOX(1.854682692392445E7\, 1.8552751235904157E7\, 2193187.709933591\, 2187863.8226788775)" +BWA, Botswana, "BBOX(2225956.6714169392\, 3269856.198060967\, -2012057.3125287183\, -3107932.575048184)" +BDI, Burundi, "BBOX(3226595.4401938887\, 3434561.510989516\, -256277.86419111618\, -495653.34463959694)" +ATF, French Southern & Antarctic Lands, "BBOX(5749744.761766512\, 7855537.163585416\, -5833010.924598094\, -6398787.743617378)" +HMD, Heard I. & McDonald Is., "BBOX(8152450.513138738\, 8212470.976939865\, -6976553.288377103\, -7019975.393962887)" +KEN, Kenya, "BBOX(3774534.2414511004\, 4664861.406119774\, 515133.4762737857\, -520395.9201280237)" +RWA, Rwanda, "BBOX(3212062.1240753955\, 3439022.3056239635\, -117387.0182772328\, -314659.7811132031)" +TZA, Tanzania, "BBOX(3266205.9206388732\, 4501404.98655826\, -111015.40498408281\, -1316180.4208213643)" +ZMB, Zambia, "BBOX(2448627.045068894\, 3751720.8702890654\, -915014.476700008\, -2046319.4302683398)" +ZWE, Zimbabwe, "BBOX(2809472.180051312\, 3681512.6693309383\, -1760356.671722378\, -2561396.0054164226)" +ATA, Antarctica, "BBOX(-2.003750834E7\, 2.003750834E7\, -8512662.881033322\, -4.748140766343476E9)" +NOR, Bouvet I., "BBOX(372070.1471544857\, 387855.25094677455\, -7243144.612387524\, -7258293.454237509)" +COM, Comoros, "BBOX(4810563.480097139\, 4957103.455881589\, -1273745.795821429\, -1389333.8616461232)" +REU, Juan De Nova I., "BBOX(4755993.663842456\, 4760121.613199477\, -1926881.0822095312\, -1929687.4249448022)" +LSO, Lesotho, "BBOX(3007181.718244638\, 3278977.271857335\, -3321117.2692412077\, -3587446.106149188)" +MWI, Malawi, "BBOX(3638129.460024005\, 3998703.3094073967\, -1048497.2089351554\, -1936578.3607502843)" +MOZ, Mozambique, "BBOX(3363297.7786198338\, 4546968.054133605\, -1172181.8581435068\, -3106026.6491282047)" +ZAF, South Africa, "BBOX(1834915.5679635953\, 4218142.412200545\, -2527908.4975596936\, -5937134.146607068)" +SWZ, Swaziland, "BBOX(3428455.080322901\, 3577073.7249586442\, -2965472.9128583763\, -3163056.5390926218)" +AGO, Angola, "BBOX(1305916.2195893514\, 2681068.153065396\, -489058.770192694\, -2039467.1713562359)" +COG, Congo, "BBOX(1240172.93208683\, 2075397.0601399948\, 413407.92638141196\, -558981.4471095677)" +ZAR, Congo\, DRC, "BBOX(1359717.9313576685\, 3484608.750292371\, 599858.1461695591\, -1512112.8916449302)" +FJI, Fiji, "BBOX(-2.003750834E7\, 2.003750834E7\, -1822502.649701532\, -2174110.2636207)" +GAB, Gabon, "BBOX(968572.632860957\, 1616312.474546188\, 258097.85802697268\, -437302.607003333)" +NAM, Namibia, "BBOX(1304262.6798733384\, 2812423.14843234\, -1915491.159689654\, -3370794.2160844747)" +NZL, New Zealand, "BBOX(-1.9686713351283982E7\, 1.9908496063316472E7\, -4084625.39078185\, -6905327.726548656)" +IOT, British Indian Ocean Territory, "BBOX(8054844.581749367\, 8070026.5565406205\, -807374.1159864698\, -830132.9519243974)" +REU, Glorioso Is., "BBOX(5263084.334556216\, 5265751.883513724\, -1295003.534066991\, -1297694.4422191991)" +MDG, Madagascar, "BBOX(4813101.564486872\, 5621789.129689449\, -1339512.841638736\, -2948183.285092941)" +MUS, Mauritius, "BBOX(6379309.136233983\, 7068315.001831045\, -2234372.9783939887\, -2334800.8501905375)" +MYT, Mayotte, "BBOX(5013736.69021733\, 5042032.101022207\, -1421199.6616333937\, -1458875.4272419864)" +REU, Reunion, "BBOX(6147123.9517467795\, 6217533.529663724\, -2374779.1643490326\, -2436517.3438334884)" +SYC, Seychelles, "BBOX(5143593.993155349\, 6182746.188795668\, -507222.7096158059\, -1058244.6497234497)" +CXR, Christmas I., "BBOX(1.175856649136589E7\, 1.1772247656782478E7\, -1162331.3692172004\, -1176694.9418773586)" +CCK, Cocos Is., "BBOX(1.0777673796502084E7\, 1.0782945219668373E7\, -1360554.4203425802\, -1368415.0936628287)" +IDN, Indonesia, "BBOX(1.0598833913871005E7\, 1.5696829439852942E7\, 659456.6237303711\, -1224130.4157647756)" +TLS, Timor Leste, "BBOX(1.3808748684969299E7\, 1.4171927521756383E7\, -909204.3581778448\, -1058309.2029773812)" +AUS, Australia, "BBOX(1.2568773011020126E7\, 1.7695387664886124E7\, -1134231.265244234\, -7314247.137263005)" +NRU, Nauru, "BBOX(1.8579714820321366E7\, 1.8585573231162526E7\, -54918.590898148344\, -61473.57829423625)" +NCL, New Caledonia, "BBOX(1.8254475669742182E7\, 1.871620264608858E7\, -2283448.9342597914\, -2592628.621050228)" +NFK, Norfolk I., "BBOX(1.869176089341545E7\, 1.870154888228107E7\, -3375716.673702962\, -3385973.448600687)" +PNG, Papua New Guinea, "BBOX(1.5680335898821346E7\, 1.7362149763616595E7\, -150883.37308403326\, -1305049.300451269)" +SLB, Solomon Is., "BBOX(1.7329249844714675E7\, 1.858276697811951E7\, -736957.2370687899\, -1328168.5471204517)" +TUV, Tuvalu, "BBOX(1.962509790181899E7\, 1.9952046251859576E7\, -679153.8120624761\, -956604.9181074377)" +VUT, Vanuatu, "BBOX(1.8537103723002467E7\, 1.8912498315429542E7\, -1540647.6688226506\, -2303165.641357482)" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec index e4f1726b3e1ff..7a7d2486d97e1 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec @@ -319,7 +319,7 @@ Parto |Bamford |6.004230000000001 // end::evalReplace-result[] ; -docsEvalUnnamedColumn +docsEvalUnnamedColumn#[skip:-8.12.99,reason:expression spaces are maintained since 8.13] // tag::evalUnnamedColumn[] FROM employees | SORT emp_no @@ -329,7 +329,7 @@ FROM employees | LIMIT 3; // tag::evalUnnamedColumn-result[] -first_name:keyword | last_name:keyword | height:double | height*3.281:double +first_name:keyword | last_name:keyword | height:double | height * 3.281:double Georgi |Facello |2.03 |6.66043 Bezalel |Simmel |2.08 |6.82448 Parto |Bamford |1.83 |6.004230000000001 @@ -348,4 +348,4 @@ FROM employees avg_height_feet:double 5.801464200000001 // end::evalUnnamedColumnStats-result[] -; \ No newline at end of file +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-countries_bbox.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-countries_bbox.json new file mode 100644 index 0000000000000..eb386b84ff70b --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-countries_bbox.json @@ -0,0 +1,13 @@ +{ + "properties": { + "id": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "shape": { + "type": "geo_shape" + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-countries_bbox_web.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-countries_bbox_web.json new file mode 100644 index 0000000000000..303c828c84285 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-countries_bbox_web.json @@ -0,0 +1,13 @@ +{ + "properties": { + "id": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "shape": { + "type": "shape" + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/null.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/null.csv-spec index 0d7fed9028fe4..92537ed1221e8 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/null.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/null.csv-spec @@ -56,7 +56,7 @@ COUNT(emp_no):long // end::is-not-null-result[] ; -coalesceSimple +coalesceSimple#[skip:-8.12.99,reason:expression spaces are maintained since 8.13] // tag::coalesce[] ROW a=null, b="b" | EVAL COALESCE(a, b) @@ -64,7 +64,7 @@ ROW a=null, b="b" ; // tag::coalesce-result[] -a:null | b:keyword | COALESCE(a,b):keyword +a:null | b:keyword | COALESCE(a, b):keyword null | b | b // end::coalesce-result[] ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index f3cb362c40e22..754d4a0e156cf 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -46,10 +46,10 @@ median_absolute_deviation|"double|integer|long|unsigned_long median_absolute_dev min |"double|integer|long|unsigned_long min(field:double|integer|long|unsigned_long)" |field |"double|integer|long|unsigned_long" | "" |"double|integer|long|unsigned_long" | "The minimum value of a numeric field." | false | false | true mv_avg |"double mv_avg(field:double|integer|long|unsigned_long)" |field |"double|integer|long|unsigned_long" | "" |double | "Converts a multivalued field into a single valued field containing the average of all of the values." | false | false | false mv_concat |"keyword mv_concat(v:text|keyword, delim:text|keyword)" |[v, delim] |["text|keyword", "text|keyword"] |["values to join", "delimiter"] |keyword | "Reduce a multivalued string field to a single valued field by concatenating all values." | [false, false] | false | false -mv_count |"integer mv_count(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | "" | integer | "Reduce a multivalued field to a single valued field containing the count of values." | false | false | false +mv_count |"integer mv_count(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" | "" | integer | "Reduce a multivalued field to a single valued field containing the count of values." | false | false | false mv_dedupe |"boolean|date|double|integer|ip|keyword|long|text|version mv_dedupe(v:boolean|date|double|integer|ip|keyword|long|text|version)" |v | "boolean|date|double|integer|ip|keyword|long|text|version" | "" |"boolean|date|double|integer|ip|keyword|long|text|version" | "Remove duplicate values from a multivalued field." | false | false | false -mv_first |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version mv_first(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the first value." | false | false | false -mv_last |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version mv_last(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the last value." | false | false | false +mv_first |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version mv_first(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the first value." | false | false | false +mv_last |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version mv_last(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the last value." | false | false | false mv_max |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_max(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the maximum value." | false | false | false mv_median |"double|integer|long|unsigned_long mv_median(v:double|integer|long|unsigned_long)" |v |"double|integer|long|unsigned_long" | "" |"double|integer|long|unsigned_long" | "Converts a multivalued field into a single valued field containing the median value." | false | false | false mv_min |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_min(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the minimum value." | false | false | false @@ -75,20 +75,22 @@ tau |double tau() to_bool |"boolean to_bool(v:boolean|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|keyword|text|double|long|unsigned_long|integer" | |boolean | "Converts an input value to a boolean value." |false |false | false to_boolean |"boolean to_boolean(v:boolean|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|keyword|text|double|long|unsigned_long|integer" | |boolean | "Converts an input value to a boolean value." |false |false | false to_cartesianpoint |"cartesian_point to_cartesianpoint(v:cartesian_point|keyword|text)" |v |"cartesian_point|keyword|text" | |cartesian_point | "Converts an input value to a point value." |false |false | false +to_cartesianshape |"cartesian_shape to_cartesianshape(v:cartesian_shape|keyword|text)" |v |"cartesian_shape|keyword|text" | |cartesian_shape | "Converts an input value to a shape value." |false |false | false to_datetime |"date to_datetime(v:date|keyword|text|double|long|unsigned_long|integer)" |v |"date|keyword|text|double|long|unsigned_long|integer" | |date | "Converts an input value to a date value." |false |false | false to_dbl |"double to_dbl(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |double | "Converts an input value to a double value." |false |false | false to_degrees |"double to_degrees(v:double|integer|long|unsigned_long)" |v |"double|integer|long|unsigned_long" | |double | "Converts a number in radians to degrees." |false |false | false to_double |"double to_double(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |double | "Converts an input value to a double value." |false |false | false to_dt |"date to_dt(v:date|keyword|text|double|long|unsigned_long|integer)" |v |"date|keyword|text|double|long|unsigned_long|integer" | |date | "Converts an input value to a date value." |false |false | false to_geopoint |"geo_point to_geopoint(v:geo_point|keyword|text)" |v |"geo_point|keyword|text" | |geo_point | "Converts an input value to a geo_point value." |false |false | false +to_geoshape |"geo_shape to_geoshape(v:geo_shape|keyword|text)" |v |"geo_shape|keyword|text" | |geo_shape | "Converts an input value to a geo_shape value." |false |false | false to_int |"integer to_int(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |integer | "Converts an input value to an integer value." |false |false | false to_integer |"integer to_integer(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |integer | "Converts an input value to an integer value." |false |false | false to_ip |"ip to_ip(v:ip|keyword|text)" |v |"ip|keyword|text" | |ip | "Converts an input string to an IP value." |false |false | false to_long |"long to_long(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |long | "Converts an input value to a long value." |false |false | false to_lower |"keyword|text to_lower(str:keyword|text)" |str |"keyword|text" | "The input string" |"keyword|text" | "Returns a new string representing the input string converted to lower case." |false |false | false to_radians |"double to_radians(v:double|integer|long|unsigned_long)" |v |"double|integer|long|unsigned_long" | |double | "Converts a number in degrees to radians." |false |false | false -to_str |"keyword to_str(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" |v |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | |keyword | "Converts a field into a string." |false |false | false -to_string |"keyword to_string(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" |v |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | |keyword | "Converts a field into a string." |false |false | false +to_str |"keyword to_str(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" |v |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" | |keyword | "Converts a field into a string." |false |false | false +to_string |"keyword to_string(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" |v |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" | |keyword | "Converts a field into a string." |false |false | false to_ul |"unsigned_long to_ul(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |unsigned_long | "Converts an input value to an unsigned long value." |false |false | false to_ulong |"unsigned_long to_ulong(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |unsigned_long | "Converts an input value to an unsigned long value." |false |false | false to_unsigned_long |"unsigned_long to_unsigned_long(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |unsigned_long | "Converts an input value to an unsigned long value." |false |false | false @@ -139,10 +141,10 @@ double e() "double|integer|long|unsigned_long min(field:double|integer|long|unsigned_long)" "double mv_avg(field:double|integer|long|unsigned_long)" "keyword mv_concat(v:text|keyword, delim:text|keyword)" -"integer mv_count(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" +"integer mv_count(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" "boolean|date|double|integer|ip|keyword|long|text|version mv_dedupe(v:boolean|date|double|integer|ip|keyword|long|text|version)" -"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version mv_first(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" -"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version mv_last(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" +"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version mv_first(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" +"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version mv_last(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_max(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" "double|integer|long|unsigned_long mv_median(v:double|integer|long|unsigned_long)" "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_min(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" @@ -167,21 +169,23 @@ double pi() double tau() "boolean to_bool(v:boolean|keyword|text|double|long|unsigned_long|integer)" "boolean to_boolean(v:boolean|keyword|text|double|long|unsigned_long|integer)" -"cartesian_point to_cartesianpoint(v:cartesian_point|keyword|text)" +"cartesian_point to_cartesianpoint(v:cartesian_point|keyword|text)" +"cartesian_shape to_cartesianshape(v:cartesian_shape|keyword|text)" "date to_datetime(v:date|keyword|text|double|long|unsigned_long|integer)" "double to_dbl(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "double to_degrees(v:double|integer|long|unsigned_long)" "double to_double(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" -"date to_dt(v:date|keyword|text|double|long|unsigned_long|integer)" -"geo_point to_geopoint(v:geo_point|keyword|text)" +"date to_dt(v:date|keyword|text|double|long|unsigned_long|integer)" +"geo_point to_geopoint(v:geo_point|keyword|text)" +"geo_shape to_geoshape(v:geo_shape|keyword|text)" "integer to_int(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "integer to_integer(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "ip to_ip(v:ip|keyword|text)" "long to_long(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "keyword|text to_lower(str:keyword|text)" "double to_radians(v:double|integer|long|unsigned_long)" -"keyword to_str(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" -"keyword to_string(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" +"keyword to_str(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" +"keyword to_string(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" "unsigned_long to_ul(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "unsigned_long to_ulong(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "unsigned_long to_unsigned_long(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" @@ -212,5 +216,5 @@ countFunctions#[skip:-8.12.99] show functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; a:long | b:long | c:long -86 | 86 | 86 +88 | 88 | 88 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec new file mode 100644 index 0000000000000..71d7c0dbdcfdd --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec @@ -0,0 +1,135 @@ +############################################### +# Tests for GEO_SHAPE type +# + +convertFromString#[skip:-8.12.99, reason: spatial type geo_shape only added in 8.13] +row wkt = "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))" +| eval pt = to_geoshape(wkt); + +wkt:keyword |pt:geo_shape +"POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))" | POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10)) +; + +convertFromStringArray#[skip:-8.12.99, reason: spatial type geo_shape only added in 8.13] +row wkt = ["POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))", "POINT(75.8092915005895 22.727749187571)"] +| eval pt = to_geoshape(wkt); + +wkt:keyword |pt:geo_shape +["POLYGON ((30 10\, 40 40\, 20 40\, 10 20\, 30 10))", "POINT(75.8092915005895 22.727749187571)"] |[POLYGON ((30 10\, 40 40\, 20 40\, 10 20\, 30 10)), POINT(75.8092915005895 22.727749187571)] +; + +# need to work out how to upload WKT +simpleLoad#[skip:-8.12.99, reason: spatial type geo_shape only added in 8.13] +FROM countries_bbox | WHERE id == "ISL"; + +id:keyword| name:keyword| shape:geo_shape +ISL|Iceland|BBOX(-24.538400, -13.499446, 66.536100, 63.390000) +; + +geo_shapeEquals#[skip:-8.12.99, reason: spatial type geo_shape only added in 8.13] + +ROW wkt = ["POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))", "POINT(75.8092915005895 22.727749187571)"] +| MV_EXPAND wkt +| EVAL pt = to_geoshape(wkt) +| WHERE pt == to_geoshape("POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))") +; + +wkt:keyword |pt:geo_shape +"POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))" |POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10)) +; + +geo_shapeNotEquals#[skip:-8.12.99, reason: spatial type geo_shape only added in 8.13] +ROW wkt = ["POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))", "POINT(75.8092915005895 22.727749187571)"] +| MV_EXPAND wkt +| EVAL pt = to_geoshape(wkt) +| WHERE pt != to_geoshape("POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))") +; + +wkt:keyword |pt:geo_shape +"POINT(75.8092915005895 22.727749187571)" |POINT(75.8092915005895 22.727749187571) +; + +convertFromStringParseError#[skip:-8.12.99, reason: spatial type geo_shape only added in 8.13] +row wkt = ["POINTX(42.97109630194 14.7552534413725)", "POINT(75.8092915005895 22.727749187571)", "POINT(111)"] +| mv_expand wkt +| eval pt = to_geoshape(wkt) +; + +warning:Line 3:13: evaluation of [to_geoshape(wkt)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 3:13: java.lang.IllegalArgumentException: Failed to parse WKT: Unknown geometry type: pointx +warning:Line 3:13: java.lang.IllegalArgumentException: Failed to parse WKT: expected number but found: ')' + +wkt:keyword |pt:geo_shape +"POINTX(42.97109630194 14.7552534413725)" |null +"POINT(75.8092915005895 22.727749187571)" |POINT(75.8092915005895 22.727749187571) +"POINT(111)" |null +; + +############################################### +# Tests for CARTESIAN_SHAPE type +# + +convertCartesianShapeFromString#[skip:-8.12.99, reason: spatial type cartesian_shape only added in 8.13] + +row wkt = "POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))" +| mv_expand wkt +| eval pt = to_cartesianshape(wkt) +; + +wkt:keyword |pt:cartesian_shape +"POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))" |POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97)) +; + +convertCartesianFromStringArray#[skip:-8.12.99, reason:spatial type cartesian_shape only added in 8.13] +row wkt = ["POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))", "POINT(7580.93 2272.77)"] +| eval pt = to_cartesianshape(wkt); + +wkt:keyword |pt:cartesian_shape +["POLYGON ((3339584.72 1118889.97\, 4452779.63 4865942.27\, 2226389.81 4865942.27\, 1113194.90 2273030.92\, 3339584.72 1118889.97))", "POINT(7580.93 2272.77)"] |[POLYGON ((3339584.72 1118889.97\, 4452779.63 4865942.27\, 2226389.81 4865942.27\, 1113194.90 2273030.92\, 3339584.72 1118889.97)), POINT(7580.93 2272.77)] +; + +# need to work out how to upload WKT +simpleCartesianShapeLoad#[skip:-8.12.99, reason: spatial type cartesian_shape only added in 8.13] +FROM countries_bbox_web | WHERE id == "ISL"; + +id:keyword| name:keyword|shape:cartesian_shape +ISL|Iceland|BBOX(-2731602.192501422, -1502751.454502109, 1.0025136653899286E7, 9196525.03584683) +; + +cartesianshapeEquals#[skip:-8.12.99, reason: spatial type cartesian_shape only added in 8.13] +ROW wkt = ["POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))", "POINT(7580.93 2272.77)"] +| MV_EXPAND wkt +| EVAL pt = to_cartesianshape(wkt) +| WHERE pt == to_cartesianshape("POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))") +; + +wkt:keyword |pt:cartesian_shape +"POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))" |POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97)) +; + +cartesianShapeNotEquals#[skip:-8.12.99, reason: spatial type cartesian_shape only added in 8.13] +ROW wkt = ["POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))", "POINT(7580.93 2272.77)"] +| MV_EXPAND wkt +| EVAL pt = to_cartesianshape(wkt) +| WHERE pt != to_cartesianshape("POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))") +; + +wkt:keyword |pt:cartesian_shape +"POINT(7580.93 2272.77)" |POINT(7580.93 2272.77) +; + +convertCartesianShapeFromStringParseError#[skip:-8.12.99, reason: spatial type cartesian_shape only added in 8.13] +row wkt = ["POINTX(4297.11 -1475.53)", "POINT(7580.93 2272.77)", "POINT(111)"] +| mv_expand wkt +| eval pt = to_cartesianshape(wkt) +; + +warning:Line 3:13: evaluation of [to_cartesianshape(wkt)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 3:13: java.lang.IllegalArgumentException: Failed to parse WKT: Unknown geometry type: pointx +warning:Line 3:13: java.lang.IllegalArgumentException: Failed to parse WKT: expected number but found: ')' + +wkt:keyword |pt:cartesian_shape +"POINTX(4297.11 -1475.53)" |null +"POINT(7580.93 2272.77)" |POINT(7580.93 2272.77) +"POINT(111)" |null +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index ded080023f5c4..d58ce27f878c0 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -879,3 +879,60 @@ AVG(salary):double | avg_salary_rounded:double 48248.55 | 48249.0 // end::statsUnnamedColumnEval-result[] ; + +nestedExpressionNoGrouping#[skip:-8.12.99,reason:StatsNestedExp breaks bwc] +FROM employees +| STATS s = SUM(emp_no + 3), c = COUNT(emp_no) +; + +s: long | c: long +1005350 | 100 +; + +nestedExpressionInSurrogateAgg#[skip:-8.12.99,reason:StatsNestedExp breaks bwc] +FROM employees +| STATS a = AVG(emp_no % 5), s = SUM(emp_no % 5), c = COUNT(emp_no % 5) +; + +a:double | s:long | c:long +2.0 | 200 | 100 +; + +nestedExpressionInGroupingWithAlias#[skip:-8.12.99,reason:StatsNestedExp breaks bwc] +FROM employees +| STATS s = SUM(emp_no % 5), c = COUNT(emp_no % 5) BY l = languages + 20 +| SORT l +; + +s:long | c:long | l : i +39 | 15 | 21 +36 | 19 | 22 +30 | 17 | 23 +32 | 18 | 24 +43 | 21 | 25 +20 | 10 | null +; + +nestedMultiExpressionInGroupingsAndAggs#[skip:-8.12.99,reason:StatsNestedExp breaks bwc] +FROM employees +| EVAL sal = salary + 10000 +| STATS sum(sal), sum(salary + 10000) BY left(first_name, 1), concat(gender, to_string(languages)) +| SORT `left(first_name, 1)`, `concat(gender, to_string(languages))` +| LIMIT 5 +; + +sum(sal):l | sum(salary + 10000):l | left(first_name, 1):s | concat(gender, to_string(languages)):s +54307 | 54307 | A | F2 +70335 | 70335 | A | F3 +76817 | 76817 | A | F5 +123675 | 123675 | A | M3 +43370 | 43370 | B | F2 +; + + + + + + + + diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec index 68f67b8a2743b..e0e8ca351cfe5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec @@ -87,7 +87,7 @@ COUNT_DISTINCT(ip0):long | COUNT_DISTINCT(ip1):long // end::count-distinct-result[] ; -countDistinctOfIpPrecision +countDistinctOfIpPrecision#[skip:-8.12.99,reason:expression spaces are maintained since 8.13] // tag::count-distinct-precision[] FROM hosts | STATS COUNT_DISTINCT(ip0, 80000), COUNT_DISTINCT(ip1, 5) @@ -95,7 +95,7 @@ FROM hosts ; // tag::count-distinct-precision-result[] -COUNT_DISTINCT(ip0,80000):long | COUNT_DISTINCT(ip1,5):long +COUNT_DISTINCT(ip0, 80000):long | COUNT_DISTINCT(ip1, 5):long 7 | 9 // end::count-distinct-precision-result[] ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec index f1849107d606d..091a625c7e10d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec @@ -77,7 +77,7 @@ m:double | p50:double 0 | 0 ; -medianOfInteger#[skip:-8.11.99,reason:ReplaceDuplicateAggWithEval breaks bwc gh-103765] +medianOfInteger#[skip:-8.12.99,reason:ReplaceDuplicateAggWithEval breaks bwc gh-103765/Expression spaces are maintained since 8.13] // tag::median[] FROM employees | STATS MEDIAN(salary), PERCENTILE(salary, 50) @@ -85,7 +85,7 @@ FROM employees ; // tag::median-result[] -MEDIAN(salary):double | PERCENTILE(salary,50):double +MEDIAN(salary):double | PERCENTILE(salary, 50):double 47003 | 47003 // end::median-result[] ; diff --git a/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.g4 b/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.g4 index fd269a779cfd5..e0e1514c903a9 100644 --- a/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.g4 +++ b/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.g4 @@ -111,15 +111,11 @@ evalCommand ; statsCommand - : STATS fields? (BY grouping)? + : STATS stats=fields? (BY grouping=fields)? ; inlinestatsCommand - : INLINESTATS fields (BY grouping)? - ; - -grouping - : qualifiedName (COMMA qualifiedName)* + : INLINESTATS stats=fields (BY grouping=fields)? ; fromIdentifier diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeFromStringEvaluator.java new file mode 100644 index 0000000000000..5ec9dcb94f67f --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeFromStringEvaluator.java @@ -0,0 +1,125 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToCartesianShape}. + * This class is generated. Do not edit it. + */ +public final class ToCartesianShapeFromStringEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public ToCartesianShapeFromStringEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "ToCartesianShapeFromString"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendBytesRef(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToCartesianShape.fromKeyword(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + BytesRef value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendBytesRef(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToCartesianShape.fromKeyword(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public ToCartesianShapeFromStringEvaluator get(DriverContext context) { + return new ToCartesianShapeFromStringEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "ToCartesianShapeFromStringEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeFromStringEvaluator.java new file mode 100644 index 0000000000000..68a6087d86953 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeFromStringEvaluator.java @@ -0,0 +1,125 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToGeoShape}. + * This class is generated. Do not edit it. + */ +public final class ToGeoShapeFromStringEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public ToGeoShapeFromStringEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "ToGeoShapeFromString"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendBytesRef(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToGeoShape.fromKeyword(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + BytesRef value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendBytesRef(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToGeoShape.fromKeyword(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public ToGeoShapeFromStringEvaluator get(DriverContext context) { + return new ToGeoShapeFromStringEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "ToGeoShapeFromStringEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromCartesianShapeEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromCartesianShapeEvaluator.java new file mode 100644 index 0000000000000..5e466ddfbfddc --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromCartesianShapeEvaluator.java @@ -0,0 +1,110 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToString}. + * This class is generated. Do not edit it. + */ +public final class ToStringFromCartesianShapeEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public ToStringFromCartesianShapeEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "ToStringFromCartesianShape"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + builder.appendBytesRef(evalValue(vector, p, scratchPad)); + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToString.fromCartesianShape(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + BytesRef value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendBytesRef(value); + valuesAppended = true; + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToString.fromCartesianShape(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public ToStringFromCartesianShapeEvaluator get(DriverContext context) { + return new ToStringFromCartesianShapeEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "ToStringFromCartesianShapeEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromGeoShapeEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromGeoShapeEvaluator.java new file mode 100644 index 0000000000000..df8e86e58fa69 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromGeoShapeEvaluator.java @@ -0,0 +1,110 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToString}. + * This class is generated. Do not edit it. + */ +public final class ToStringFromGeoShapeEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public ToStringFromGeoShapeEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "ToStringFromGeoShape"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + builder.appendBytesRef(evalValue(vector, p, scratchPad)); + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToString.fromGeoShape(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + BytesRef value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendBytesRef(value); + valuesAppended = true; + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToString.fromGeoShape(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public ToStringFromGeoShapeEvaluator get(DriverContext context) { + return new ToStringFromGeoShapeEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "ToStringFromGeoShapeEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java index 43a16872fd99a..79ce1754f7163 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java @@ -162,14 +162,14 @@ protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Pa return builder.value(UTC_DATE_TIME_FORMATTER.formatMillis(longVal)); } }; - case "geo_point" -> new PositionToXContent(block) { + case "geo_point", "geo_shape" -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { return builder.value(GEO.wkbToWkt(((BytesRefBlock) block).getBytesRef(valueIndex, scratch))); } }; - case "cartesian_point" -> new PositionToXContent(block) { + case "cartesian_point", "cartesian_shape" -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java index 63686820574b5..1763e36707958 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java @@ -39,6 +39,8 @@ public class EsqlQueryResponse extends ActionResponse implements ChunkedToXConte @SuppressWarnings("this-escape") private final AbstractRefCounted counted = AbstractRefCounted.of(this::closeInternal); + public static final String DROP_NULL_COLUMNS_OPTION = "drop_null_columns"; + private final List columns; private final List pages; private final Profile profile; @@ -160,20 +162,45 @@ private Iterator asyncPropertiesOrEmpty() { @Override public Iterator toXContentChunked(ToXContent.Params params) { - final Iterator valuesIt = ResponseXContentUtils.columnValues(this.columns, this.pages, columnar); + boolean dropNullColumns = params.paramAsBoolean(DROP_NULL_COLUMNS_OPTION, false); + boolean[] nullColumns = dropNullColumns ? nullColumns() : null; + Iterator columnHeadings = dropNullColumns + ? Iterators.concat( + ResponseXContentUtils.allColumns(columns, "all_columns"), + ResponseXContentUtils.nonNullColumns(columns, nullColumns, "columns") + ) + : ResponseXContentUtils.allColumns(columns, "columns"); + Iterator valuesIt = ResponseXContentUtils.columnValues(this.columns, this.pages, columnar, nullColumns); Iterator profileRender = profile == null ? List.of().iterator() : ChunkedToXContentHelper.field("profile", profile, params); return Iterators.concat( ChunkedToXContentHelper.startObject(), asyncPropertiesOrEmpty(), - ResponseXContentUtils.columnHeadings(columns), + columnHeadings, ChunkedToXContentHelper.array("values", valuesIt), profileRender, ChunkedToXContentHelper.endObject() ); } + private boolean[] nullColumns() { + boolean[] nullColumns = new boolean[columns.size()]; + for (int c = 0; c < nullColumns.length; c++) { + nullColumns[c] = allColumnsAreNull(c); + } + return nullColumns; + } + + private boolean allColumnsAreNull(int c) { + for (Page page : pages) { + if (page.getBlock(c).areAllValuesNull() == false) { + return false; + } + } + return true; + } + @Override public boolean isFragment() { return false; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java index 40bc90d8c5b0c..d5dc12357f3fe 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java @@ -101,8 +101,8 @@ private static Object valueAt(String dataType, Block block, int offset, BytesRef } case "boolean" -> ((BooleanBlock) block).getBoolean(offset); case "version" -> new Version(((BytesRefBlock) block).getBytesRef(offset, scratch)).toString(); - case "geo_point" -> GEO.wkbToWkt(((BytesRefBlock) block).getBytesRef(offset, scratch)); - case "cartesian_point" -> CARTESIAN.wkbToWkt(((BytesRefBlock) block).getBytesRef(offset, scratch)); + case "geo_point", "geo_shape" -> GEO.wkbToWkt(((BytesRefBlock) block).getBytesRef(offset, scratch)); + case "cartesian_point", "cartesian_shape" -> CARTESIAN.wkbToWkt(((BytesRefBlock) block).getBytesRef(offset, scratch)); case "unsupported" -> UnsupportedValueSource.UNSUPPORTED_OUTPUT; case "_source" -> { BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); @@ -161,12 +161,12 @@ static Page valuesToPage(BlockFactory blockFactory, List columns, Li throw new UncheckedIOException(e); } } - case "geo_point" -> { + case "geo_point", "geo_shape" -> { // This just converts WKT to WKB, so does not need CRS knowledge, we could merge GEO and CARTESIAN here BytesRef wkb = GEO.wktToWkb(value.toString()); ((BytesRefBlock.Builder) builder).appendBytesRef(wkb); } - case "cartesian_point" -> { + case "cartesian_point", "cartesian_shape" -> { // This just converts WKT to WKB, so does not need CRS knowledge, we could merge GEO and CARTESIAN here BytesRef wkb = CARTESIAN.wktToWkb(value.toString()); ((BytesRefBlock.Builder) builder).appendBytesRef(wkb); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java index e28e6beebabed..ca40faff81c55 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java @@ -23,10 +23,12 @@ */ final class ResponseXContentUtils { - /** Returns the column headings for the given columns. */ - static Iterator columnHeadings(List columns) { + /** + * Returns the column headings for the given columns. + */ + static Iterator allColumns(List columns, String name) { return ChunkedToXContentHelper.singleChunk((builder, params) -> { - builder.startArray("columns"); + builder.startArray(name); for (ColumnInfo col : columns) { col.toXContent(builder, params); } @@ -34,43 +36,62 @@ static Iterator columnHeadings(List columns) { }); } + /** + * Returns the column headings for the given columns, moving the heading + * for always-null columns to a {@code null_columns} section. + */ + static Iterator nonNullColumns(List columns, boolean[] nullColumns, String name) { + return ChunkedToXContentHelper.singleChunk((builder, params) -> { + builder.startArray(name); + for (int c = 0; c < columns.size(); c++) { + if (nullColumns[c] == false) { + columns.get(c).toXContent(builder, params); + } + } + return builder.endArray(); + }); + } + /** Returns the column values for the given pages (described by the column infos). */ - static Iterator columnValues(List columns, List pages, boolean columnar) { + static Iterator columnValues( + List columns, + List pages, + boolean columnar, + boolean[] nullColumns + ) { if (pages.isEmpty()) { return Collections.emptyIterator(); } else if (columnar) { - return columnarValues(columns, pages); + return columnarValues(columns, pages, nullColumns); } else { - return rowValues(columns, pages); + return rowValues(columns, pages, nullColumns); } } /** Returns a columnar based representation of the values in the given pages (described by the column infos). */ - static Iterator columnarValues(List columns, List pages) { + static Iterator columnarValues(List columns, List pages, boolean[] nullColumns) { final BytesRef scratch = new BytesRef(); - return Iterators.flatMap( - Iterators.forRange( - 0, - columns.size(), - column -> Iterators.concat( - Iterators.single(((builder, params) -> builder.startArray())), - Iterators.flatMap(pages.iterator(), page -> { - ColumnInfo.PositionToXContent toXContent = columns.get(column).positionToXContent(page.getBlock(column), scratch); - return Iterators.forRange( - 0, - page.getPositionCount(), - position -> (builder, params) -> toXContent.positionToXContent(builder, params, position) - ); - }), - ChunkedToXContentHelper.endArray() - ) - ), - Function.identity() - ); + return Iterators.flatMap(Iterators.forRange(0, columns.size(), column -> { + if (nullColumns != null && nullColumns[column]) { + return Collections.emptyIterator(); + } + return Iterators.concat( + Iterators.single(((builder, params) -> builder.startArray())), + Iterators.flatMap(pages.iterator(), page -> { + ColumnInfo.PositionToXContent toXContent = columns.get(column).positionToXContent(page.getBlock(column), scratch); + return Iterators.forRange( + 0, + page.getPositionCount(), + position -> (builder, params) -> toXContent.positionToXContent(builder, params, position) + ); + }), + ChunkedToXContentHelper.endArray() + ); + }), Function.identity()); } /** Returns a row based representation of the values in the given pages (described by the column infos). */ - static Iterator rowValues(List columns, List pages) { + static Iterator rowValues(List columns, List pages, boolean[] nullColumns) { final BytesRef scratch = new BytesRef(); return Iterators.flatMap(pages.iterator(), page -> { final int columnCount = columns.size(); @@ -82,7 +103,9 @@ static Iterator rowValues(List columns, List

    (builder, params) -> { builder.startArray(); for (int c = 0; c < columnCount; c++) { - toXContents[c].positionToXContent(builder, params, position); + if (nullColumns == null || nullColumns[c] == false) { + toXContents[c].positionToXContent(builder, params, position); + } } return builder.endArray(); }); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java index 3dea461ccf8b7..0b2bad2eb22d3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java @@ -18,11 +18,11 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Collections; import java.util.List; import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xpack.esql.action.EsqlQueryResponse.DROP_NULL_COLUMNS_OPTION; import static org.elasticsearch.xpack.esql.formatter.TextFormat.URL_PARAM_DELIMITER; @ServerlessScope(Scope.PUBLIC) @@ -60,6 +60,6 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli @Override protected Set responseParams() { - return Collections.singleton(URL_PARAM_DELIMITER); + return Set.of(URL_PARAM_DELIMITER, DROP_NULL_COLUMNS_OPTION); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlGetAsyncResultAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlGetAsyncResultAction.java index 35a679e23d1f7..b5a1821350e5e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlGetAsyncResultAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlGetAsyncResultAction.java @@ -16,8 +16,11 @@ import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.xpack.esql.action.EsqlQueryResponse.DROP_NULL_COLUMNS_OPTION; +import static org.elasticsearch.xpack.esql.formatter.TextFormat.URL_PARAM_DELIMITER; @ServerlessScope(Scope.PUBLIC) public class RestEsqlGetAsyncResultAction extends BaseRestHandler { @@ -42,4 +45,9 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli } return channel -> client.execute(EsqlAsyncGetResultAction.INSTANCE, get, new RestRefCountedChunkedToXContentListener<>(channel)); } + + @Override + protected Set responseParams() { + return Set.of(URL_PARAM_DELIMITER, DROP_NULL_COLUMNS_OPTION); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java index 6b8e7fc397865..070c0e112e051 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java @@ -19,7 +19,6 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Collections; import java.util.List; import java.util.Set; @@ -65,6 +64,6 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli @Override protected Set responseParams() { - return Collections.singleton(URL_PARAM_DELIMITER); + return Set.of(URL_PARAM_DELIMITER, EsqlQueryResponse.DROP_NULL_COLUMNS_OPTION); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index af5a0bd3f0b70..e0a36c8d81e82 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.plan.logical.Drop; import org.elasticsearch.xpack.esql.plan.logical.Enrich; @@ -43,7 +42,6 @@ import org.elasticsearch.xpack.ql.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.ql.index.EsIndex; -import org.elasticsearch.xpack.ql.index.IndexResolution; import org.elasticsearch.xpack.ql.plan.TableIdentifier; import org.elasticsearch.xpack.ql.plan.logical.Aggregate; import org.elasticsearch.xpack.ql.plan.logical.EsRelation; @@ -209,52 +207,35 @@ protected LogicalPlan rule(Enrich plan, AnalyzerContext context) { // the policy does not exist return plan; } - String policyName = (String) plan.policyName().fold(); - EnrichPolicyResolution policyRes = context.enrichResolution() - .resolvedPolicies() - .stream() - .filter(x -> x.policyName().equals(policyName)) - .findFirst() - .orElse(new EnrichPolicyResolution(policyName, null, null)); - - IndexResolution idx = policyRes.index(); - EnrichPolicy policy = policyRes.policy(); - - var policyNameExp = policy == null || idx == null - ? new UnresolvedAttribute( - plan.policyName().source(), - policyName, - null, - unresolvedPolicyError(policyName, context.enrichResolution()) - ) - : plan.policyName(); - - var matchField = policy != null && (plan.matchField() == null || plan.matchField() instanceof EmptyAttribute) - ? new UnresolvedAttribute(plan.source(), policy.getMatchField()) - : plan.matchField(); - - List enrichFields = policy == null || idx == null - ? (plan.enrichFields() == null ? List.of() : plan.enrichFields()) - : calculateEnrichFields( + final String policyName = (String) plan.policyName().fold(); + final EnrichResolution.ResolvedPolicy resolvedPolicy = context.enrichResolution().getResolvedPolicy(policyName); + if (resolvedPolicy != null) { + EnrichPolicy policy = resolvedPolicy.policy(); + var matchField = plan.matchField() == null || plan.matchField() instanceof EmptyAttribute + ? new UnresolvedAttribute(plan.source(), policy.getMatchField()) + : plan.matchField(); + List enrichFields = calculateEnrichFields( plan.source(), policyName, - mappingAsAttributes(plan.source(), idx.get().mapping()), + mappingAsAttributes(plan.source(), resolvedPolicy.mapping()), plan.enrichFields(), policy ); - - return new Enrich(plan.source(), plan.child(), plan.mode(), policyNameExp, matchField, policyRes, enrichFields); - } - - private String unresolvedPolicyError(String policyName, EnrichResolution enrichResolution) { - List potentialMatches = StringUtils.findSimilar(policyName, enrichResolution.existingPolicies()); - String msg = "unresolved enrich policy [" + policyName + "]"; - if (CollectionUtils.isEmpty(potentialMatches) == false) { - msg += ", did you mean " - + (potentialMatches.size() == 1 ? "[" + potentialMatches.get(0) + "]" : "any of " + potentialMatches) - + "?"; + return new Enrich( + plan.source(), + plan.child(), + plan.mode(), + plan.policyName(), + matchField, + policy, + resolvedPolicy.concreteIndices(), + enrichFields + ); + } else { + String error = context.enrichResolution().getError(policyName); + var policyNameExp = new UnresolvedAttribute(plan.policyName().source(), policyName, null, error); + return new Enrich(plan.source(), plan.child(), plan.mode(), policyNameExp, plan.matchField(), null, Map.of(), List.of()); } - return msg; } public static List calculateEnrichFields( @@ -589,6 +570,7 @@ private LogicalPlan resolveEnrich(Enrich enrich, List childrenOutput) enrich.policyName(), resolved, enrich.policy(), + enrich.concreteIndices(), enrich.enrichFields() ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/EnrichResolution.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/EnrichResolution.java index 332e5e60565b6..deb683a94a8f1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/EnrichResolution.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/EnrichResolution.java @@ -7,8 +7,73 @@ package org.elasticsearch.xpack.esql.analysis; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.xpack.core.enrich.EnrichPolicy; +import org.elasticsearch.xpack.ql.type.EsField; +import org.elasticsearch.xpack.ql.util.CollectionUtils; +import org.elasticsearch.xpack.ql.util.StringUtils; +import java.util.Collection; +import java.util.List; +import java.util.Map; import java.util.Set; -public record EnrichResolution(Set resolvedPolicies, Set existingPolicies) {} +/** + * Holds the resolution results of the enrich polices. + * The results and errors are collected via {@link #addResolvedPolicy} and {@link #addError}. + * And the results can be retrieved via {@link #getResolvedPolicy} and {@link #getError} + */ +public final class EnrichResolution { + + private final Map resolvedPolicies = ConcurrentCollections.newConcurrentMap(); // policy name -> resolved policy + private final Map errors = ConcurrentCollections.newConcurrentMap(); // policy to error + private final Set existingPolicies = ConcurrentCollections.newConcurrentSet(); // for suggestion + + public ResolvedPolicy getResolvedPolicy(String policyName) { + return resolvedPolicies.get(policyName); + } + + public Collection resolvedEnrichPolicies() { + return resolvedPolicies.values().stream().map(r -> r.policy).toList(); + } + + public String getError(String policyName) { + final String error = errors.get(policyName); + if (error != null) { + return error; + } + return notFoundError(policyName); + } + + public void addResolvedPolicy( + String policyName, + EnrichPolicy policy, + Map concreteIndices, + Map mapping + ) { + resolvedPolicies.put(policyName, new ResolvedPolicy(policy, concreteIndices, mapping)); + } + + public void addError(String policyName, String reason) { + errors.put(policyName, reason); + } + + public void addExistingPolicies(Set policyNames) { + existingPolicies.addAll(policyNames); + } + + private String notFoundError(String policyName) { + List potentialMatches = StringUtils.findSimilar(policyName, existingPolicies); + String msg = "unresolved enrich policy [" + policyName + "]"; + if (CollectionUtils.isEmpty(potentialMatches) == false) { + msg += ", did you mean " + + (potentialMatches.size() == 1 ? "[" + potentialMatches.get(0) + "]" : "any of " + potentialMatches) + + "?"; + } + return msg; + } + + public record ResolvedPolicy(EnrichPolicy policy, Map concreteIndices, Map mapping) { + + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index d38dd57ff6aa8..f28f8f1b4ab19 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -21,11 +21,8 @@ import org.elasticsearch.xpack.ql.common.Failure; import org.elasticsearch.xpack.ql.expression.Alias; import org.elasticsearch.xpack.ql.expression.Expression; -import org.elasticsearch.xpack.ql.expression.FieldAttribute; -import org.elasticsearch.xpack.ql.expression.Literal; -import org.elasticsearch.xpack.ql.expression.MetadataAttribute; +import org.elasticsearch.xpack.ql.expression.Expressions; import org.elasticsearch.xpack.ql.expression.NamedExpression; -import org.elasticsearch.xpack.ql.expression.ReferenceAttribute; import org.elasticsearch.xpack.ql.expression.TypeResolutions; import org.elasticsearch.xpack.ql.expression.UnresolvedAttribute; import org.elasticsearch.xpack.ql.expression.function.aggregate.AggregateFunction; @@ -150,36 +147,39 @@ else if (p.resolved()) { private static void checkAggregate(LogicalPlan p, Set failures) { if (p instanceof Aggregate agg) { + // check aggregates agg.aggregates().forEach(e -> { - var exp = e instanceof Alias ? ((Alias) e).child() : e; - if (exp instanceof AggregateFunction aggFunc) { - Expression field = aggFunc.field(); - - // TODO: allow an expression? - if ((field instanceof FieldAttribute - || field instanceof MetadataAttribute - || field instanceof ReferenceAttribute - || field instanceof Literal) == false) { + var exp = e instanceof Alias a ? a.child() : e; + if (exp instanceof AggregateFunction af) { + af.field().forEachDown(AggregateFunction.class, f -> { + failures.add(fail(f, "nested aggregations [{}] not allowed inside other aggregations [{}]", f, af)); + }); + } else { + if (Expressions.match(agg.groupings(), g -> { + Expression to = g instanceof Alias al ? al.child() : g; + return to.semanticEquals(exp); + }) == false) { failures.add( fail( - e, - "aggregate function's field must be an attribute or literal; found [" - + field.sourceText() + exp, + "expected an aggregate function or group but got [" + + exp.sourceText() + "] of type [" - + field.nodeName() + + exp.nodeName() + "]" ) ); } - } else if (agg.groupings().contains(exp) == false) { // TODO: allow an expression? - failures.add( - fail( - exp, - "expected an aggregate function or group but got [" + exp.sourceText() + "] of type [" + exp.nodeName() + "]" - ) - ); } }); + + // check grouping + // The grouping can not be an aggregate function + agg.groupings().forEach(e -> e.forEachUp(g -> { + if (g instanceof AggregateFunction af) { + failures.add(fail(g, "cannot use an aggregate [{}] for grouping", af)); + } + })); } } @@ -214,12 +214,17 @@ private static void checkRow(LogicalPlan p, Set failures) { private static void checkEvalFields(LogicalPlan p, Set failures) { if (p instanceof Eval eval) { eval.fields().forEach(field -> { + // check supported types DataType dataType = field.dataType(); if (EsqlDataTypes.isRepresentable(dataType) == false) { failures.add( fail(field, "EVAL does not support type [{}] in expression [{}]", dataType.typeName(), field.child().sourceText()) ); } + // check no aggregate functions are used + field.forEachDown(AggregateFunction.class, af -> { + failures.add(fail(af, "aggregate function [{}] not allowed outside STATS command", af.sourceText())); + }); }); } } @@ -279,7 +284,9 @@ public static Failure validateBinaryComparison(BinaryComparison bc) { allowed.add(DataTypes.DATETIME); allowed.add(DataTypes.VERSION); allowed.add(EsqlDataTypes.GEO_POINT); + allowed.add(EsqlDataTypes.GEO_SHAPE); allowed.add(EsqlDataTypes.CARTESIAN_POINT); + allowed.add(EsqlDataTypes.CARTESIAN_SHAPE); if (bc instanceof Equals || bc instanceof NotEquals) { allowed.add(DataTypes.BOOLEAN); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolution.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolution.java deleted file mode 100644 index 5014fe1fcd1df..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolution.java +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.enrich; - -import org.elasticsearch.xpack.core.enrich.EnrichPolicy; -import org.elasticsearch.xpack.ql.index.IndexResolution; - -public record EnrichPolicyResolution(String policyName, EnrichPolicy policy, IndexResolution index) {} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index 1e21886a7ac4b..d5783e5ef0100 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -11,12 +11,16 @@ import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; @@ -25,10 +29,14 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.enrich.EnrichMetadata; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; +import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.session.EsqlSession; +import org.elasticsearch.xpack.ql.index.EsIndex; import org.elasticsearch.xpack.ql.index.IndexResolver; +import java.util.Collection; +import java.util.List; import java.util.Map; import java.util.Set; @@ -48,36 +56,69 @@ public EnrichPolicyResolver(ClusterService clusterService, TransportService tran transportService.registerRequestHandler( RESOLVE_ACTION_NAME, threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME), - ResolveRequest::new, + LookupRequest::new, new RequestHandler() ); } - public void resolvePolicy(String policyName, ActionListener listener) { + public void resolvePolicy(Collection policyNames, ActionListener listener) { + if (policyNames.isEmpty()) { + listener.onResponse(new EnrichResolution()); + return; + } transportService.sendRequest( clusterService.localNode(), RESOLVE_ACTION_NAME, - new ResolveRequest(policyName), - new ActionListenerResponseHandler<>( - listener.map(r -> r.resolution), - ResolveResponse::new, - threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME) - ) + new LookupRequest(policyNames), + new ActionListenerResponseHandler<>(listener.delegateFailureAndWrap((l, lookup) -> { + final EnrichResolution resolution = new EnrichResolution(); + resolution.addExistingPolicies(lookup.allPolicies); + try (RefCountingListener refs = new RefCountingListener(l.map(unused -> resolution))) { + for (Map.Entry e : lookup.policies.entrySet()) { + resolveOnePolicy(e.getKey(), e.getValue(), resolution, refs.acquire()); + } + } + }), LookupResponse::new, threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME)) ); } + private void resolveOnePolicy(String policyName, EnrichPolicy policy, EnrichResolution resolution, ActionListener listener) { + ThreadContext threadContext = threadPool.getThreadContext(); + listener = ContextPreservingActionListener.wrapPreservingContext(listener, threadContext); + try (ThreadContext.StoredContext ignored = threadContext.stashWithOrigin(ClientHelper.ENRICH_ORIGIN)) { + indexResolver.resolveAsMergedMapping( + EnrichPolicy.getBaseName(policyName), + IndexResolver.ALL_FIELDS, + false, + Map.of(), + listener.map(indexResult -> { + if (indexResult.isValid()) { + EsIndex esIndex = indexResult.get(); + Set indices = esIndex.concreteIndices(); + var concreteIndices = Map.of(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY, Iterables.get(indices, 0)); + resolution.addResolvedPolicy(policyName, policy, concreteIndices, esIndex.mapping()); + } else { + resolution.addError(policyName, indexResult.toString()); + } + return null; + }), + EsqlSession::specificValidity + ); + } + } + private static UnsupportedOperationException unsupported() { return new UnsupportedOperationException("local node transport action"); } - private static class ResolveRequest extends TransportRequest { - private final String policyName; + private static class LookupRequest extends TransportRequest { + private final Collection policyNames; - ResolveRequest(String policyName) { - this.policyName = policyName; + LookupRequest(Collection policyNames) { + this.policyNames = policyNames; } - ResolveRequest(StreamInput in) { + LookupRequest(StreamInput in) { throw unsupported(); } @@ -87,14 +128,16 @@ public void writeTo(StreamOutput out) { } } - private static class ResolveResponse extends TransportResponse { - private final EnrichPolicyResolution resolution; + private static class LookupResponse extends TransportResponse { + final Map policies; + final Set allPolicies; - ResolveResponse(EnrichPolicyResolution resolution) { - this.resolution = resolution; + LookupResponse(Map policies, Set allPolicies) { + this.policies = policies; + this.allPolicies = allPolicies; } - ResolveResponse(StreamInput in) { + LookupResponse(StreamInput in) { throw unsupported(); } @@ -104,38 +147,19 @@ public void writeTo(StreamOutput out) { } } - private class RequestHandler implements TransportRequestHandler { + private class RequestHandler implements TransportRequestHandler { @Override - public void messageReceived(ResolveRequest request, TransportChannel channel, Task task) throws Exception { - String policyName = request.policyName; - EnrichPolicy policy = policies().get(policyName); - ThreadContext threadContext = threadPool.getThreadContext(); - ActionListener listener = new ChannelActionListener<>(channel); - listener = ContextPreservingActionListener.wrapPreservingContext(listener, threadContext); - try (ThreadContext.StoredContext ignored = threadContext.stashWithOrigin(ClientHelper.ENRICH_ORIGIN)) { - indexResolver.resolveAsMergedMapping( - EnrichPolicy.getBaseName(policyName), - IndexResolver.ALL_FIELDS, - false, - Map.of(), - listener.map(indexResult -> new ResolveResponse(new EnrichPolicyResolution(policyName, policy, indexResult))), - EsqlSession::specificValidity - ); + public void messageReceived(LookupRequest request, TransportChannel channel, Task task) throws Exception { + final EnrichMetadata metadata = clusterService.state().metadata().custom(EnrichMetadata.TYPE); + final Map policies = metadata == null ? Map.of() : metadata.getPolicies(); + final Map results = Maps.newMapWithExpectedSize(request.policyNames.size()); + for (String policyName : request.policyNames) { + EnrichPolicy p = policies.get(policyName); + if (p != null) { + results.put(policyName, new EnrichPolicy(p.getType(), null, List.of(), p.getMatchField(), p.getEnrichFields())); + } } + new ChannelActionListener<>(channel).onResponse(new LookupResponse(results, policies.keySet())); } } - - public Set allPolicyNames() { - // TODO: remove this suggestion as it exposes policy names without the right permission - return policies().keySet(); - } - - private Map policies() { - if (clusterService == null || clusterService.state() == null) { - return Map.of(); - } - EnrichMetadata metadata = clusterService.state().metadata().custom(EnrichMetadata.TYPE); - return metadata == null ? Map.of() : metadata.getPolicies(); - } - } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java index 7f5a6079cc6d7..85b30032c1070 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java @@ -163,10 +163,7 @@ public final ExpressionEvaluator.Factory map(BinaryComparison bc, Layout layout) if (leftType == DataTypes.DATETIME) { return longs.apply(bc.source(), leftEval, rightEval); } - if (leftType == EsqlDataTypes.GEO_POINT) { - return geometries.apply(bc.source(), leftEval, rightEval, leftType); - } - if (leftType == EsqlDataTypes.CARTESIAN_POINT) { + if (EsqlDataTypes.isSpatial(leftType)) { return geometries.apply(bc.source(), leftEval, rightEval, leftType); } throw new EsqlIllegalArgumentException("resolved type for [" + bc + "] but didn't implement mapping"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index f8d9bfbc160a8..b3229f1c36c2b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -21,10 +21,12 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Least; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToBoolean; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianPoint; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianShape; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDatetime; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDegrees; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToGeoPoint; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToGeoShape; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToIP; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToInteger; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; @@ -177,10 +179,12 @@ private FunctionDefinition[][] functions() { new FunctionDefinition[] { def(ToBoolean.class, ToBoolean::new, "to_boolean", "to_bool"), def(ToCartesianPoint.class, ToCartesianPoint::new, "to_cartesianpoint"), + def(ToCartesianShape.class, ToCartesianShape::new, "to_cartesianshape"), def(ToDatetime.class, ToDatetime::new, "to_datetime", "to_dt"), def(ToDegrees.class, ToDegrees::new, "to_degrees"), def(ToDouble.class, ToDouble::new, "to_double", "to_dbl"), def(ToGeoPoint.class, ToGeoPoint::new, "to_geopoint"), + def(ToGeoShape.class, ToGeoShape::new, "to_geoshape"), def(ToIP.class, ToIP::new, "to_ip"), def(ToInteger.class, ToInteger::new, "to_integer", "to_int"), def(ToLong.class, ToLong::new, "to_long"), @@ -260,7 +264,7 @@ public static FunctionDescription description(FunctionDefinition def) { } Constructor constructor = constructors[0]; FunctionInfo functionInfo = constructor.getAnnotation(FunctionInfo.class); - String functionDescription = functionInfo == null ? "" : functionInfo.description().replaceAll(System.lineSeparator(), " "); + String functionDescription = functionInfo == null ? "" : functionInfo.description().replaceAll("\n", " "); String[] returnType = functionInfo == null ? new String[] { "?" } : functionInfo.returnType(); var params = constructor.getParameters(); // no multiple c'tors supported @@ -273,7 +277,7 @@ public static FunctionDescription description(FunctionDefinition def) { String name = paramInfo == null ? params[i].getName() : paramInfo.name(); variadic |= List.class.isAssignableFrom(params[i].getType()); String[] type = paramInfo == null ? new String[] { "?" } : paramInfo.type(); - String desc = paramInfo == null ? "" : paramInfo.description().replaceAll(System.lineSeparator(), " "); + String desc = paramInfo == null ? "" : paramInfo.description().replaceAll("\n", " "); boolean optional = paramInfo == null ? false : paramInfo.optional(); args.add(new EsqlFunctionRegistry.ArgSignature(name, type, desc, optional)); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShape.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShape.java new file mode 100644 index 0000000000000..64db9c6f015ed --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShape.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.CARTESIAN_SHAPE; +import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; +import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; + +public class ToCartesianShape extends AbstractConvertFunction { + + private static final Map EVALUATORS = Map.ofEntries( + Map.entry(CARTESIAN_SHAPE, (fieldEval, source) -> fieldEval), + Map.entry(KEYWORD, ToCartesianShapeFromStringEvaluator.Factory::new), + Map.entry(TEXT, ToCartesianShapeFromStringEvaluator.Factory::new) + ); + + @FunctionInfo(returnType = "cartesian_shape", description = "Converts an input value to a shape value.") + public ToCartesianShape(Source source, @Param(name = "v", type = { "cartesian_shape", "keyword", "text" }) Expression field) { + super(source, field); + } + + @Override + protected Map factories() { + return EVALUATORS; + } + + @Override + public DataType dataType() { + return CARTESIAN_SHAPE; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new ToCartesianShape(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ToCartesianShape::new, field()); + } + + @ConvertEvaluator(extraName = "FromString", warnExceptions = { IllegalArgumentException.class }) + static BytesRef fromKeyword(BytesRef in) { + return CARTESIAN.wktToWkb(in.utf8ToString()); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShape.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShape.java new file mode 100644 index 0000000000000..075c5e753d76f --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShape.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_SHAPE; +import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; +import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; + +public class ToGeoShape extends AbstractConvertFunction { + + private static final Map EVALUATORS = Map.ofEntries( + Map.entry(GEO_SHAPE, (fieldEval, source) -> fieldEval), + Map.entry(KEYWORD, ToGeoShapeFromStringEvaluator.Factory::new), + Map.entry(TEXT, ToGeoShapeFromStringEvaluator.Factory::new) + ); + + @FunctionInfo(returnType = "geo_shape", description = "Converts an input value to a geo_shape value.") + public ToGeoShape(Source source, @Param(name = "v", type = { "geo_shape", "keyword", "text" }) Expression field) { + super(source, field); + } + + @Override + protected Map factories() { + return EVALUATORS; + } + + @Override + public DataType dataType() { + return GEO_SHAPE; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new ToGeoShape(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ToGeoShape::new, field()); + } + + @ConvertEvaluator(extraName = "FromString", warnExceptions = { IllegalArgumentException.class }) + static BytesRef fromKeyword(BytesRef in) { + return GEO.wktToWkb(in.utf8ToString()); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java index e157f508f9466..688996dd1db00 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java @@ -23,7 +23,9 @@ import java.util.Map; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.CARTESIAN_POINT; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.CARTESIAN_SHAPE; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_POINT; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_SHAPE; import static org.elasticsearch.xpack.ql.type.DataTypes.BOOLEAN; import static org.elasticsearch.xpack.ql.type.DataTypes.DATETIME; import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; @@ -53,7 +55,9 @@ public class ToString extends AbstractConvertFunction implements EvaluatorMapper Map.entry(VERSION, ToStringFromVersionEvaluator.Factory::new), Map.entry(UNSIGNED_LONG, ToStringFromUnsignedLongEvaluator.Factory::new), Map.entry(GEO_POINT, ToStringFromGeoPointEvaluator.Factory::new), - Map.entry(CARTESIAN_POINT, ToStringFromCartesianPointEvaluator.Factory::new) + Map.entry(CARTESIAN_POINT, ToStringFromCartesianPointEvaluator.Factory::new), + Map.entry(CARTESIAN_SHAPE, ToStringFromCartesianShapeEvaluator.Factory::new), + Map.entry(GEO_SHAPE, ToStringFromGeoShapeEvaluator.Factory::new) ); @FunctionInfo(returnType = "keyword", description = "Converts a field into a string.") @@ -64,9 +68,11 @@ public ToString( type = { "boolean", "cartesian_point", + "cartesian_shape", "date", "double", "geo_point", + "geo_shape", "integer", "ip", "keyword", @@ -148,4 +154,14 @@ static BytesRef fromGeoPoint(BytesRef wkb) { static BytesRef fromCartesianPoint(BytesRef wkb) { return new BytesRef(CARTESIAN.wkbToWkt(wkb)); } + + @ConvertEvaluator(extraName = "FromCartesianShape") + static BytesRef fromCartesianShape(BytesRef wkb) { + return new BytesRef(GEO.wkbToWkt(wkb)); + } + + @ConvertEvaluator(extraName = "FromGeoShape") + static BytesRef fromGeoShape(BytesRef wkb) { + return new BytesRef(CARTESIAN.wkbToWkt(wkb)); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java index 29350203a966d..4fa89e66982e4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java @@ -39,9 +39,11 @@ public MvCount( type = { "boolean", "cartesian_point", + "cartesian_shape", "date", "double", "geo_point", + "geo_shape", "integer", "ip", "keyword", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java index 2bc8314959995..0f6bd847d68ed 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java @@ -37,9 +37,11 @@ public class MvFirst extends AbstractMultivalueFunction { returnType = { "boolean", "cartesian_point", + "cartesian_shape", "date", "double", "geo_point", + "geo_shape", "integer", "ip", "keyword", @@ -56,9 +58,11 @@ public MvFirst( type = { "boolean", "cartesian_point", + "cartesian_shape", "date", "double", "geo_point", + "geo_shape", "integer", "ip", "keyword", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java index aad003a649cca..2881854d17f6f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java @@ -37,9 +37,11 @@ public class MvLast extends AbstractMultivalueFunction { returnType = { "boolean", "cartesian_point", + "cartesian_shape", "date", "double", "geo_point", + "geo_shape", "integer", "ip", "keyword", @@ -56,9 +58,11 @@ public MvLast( type = { "boolean", "cartesian_point", + "cartesian_shape", "date", "double", "geo_point", + "geo_shape", "integer", "ip", "keyword", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index b6dce816db218..f7f7ecd0118dd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -12,10 +12,13 @@ import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.dissect.DissectParser; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; @@ -40,10 +43,12 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Least; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToBoolean; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianPoint; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianShape; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDatetime; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDegrees; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToGeoPoint; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToGeoShape; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToIP; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToInteger; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; @@ -167,7 +172,6 @@ import org.elasticsearch.xpack.ql.expression.predicate.regex.RegexMatch; import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardPattern; import org.elasticsearch.xpack.ql.index.EsIndex; -import org.elasticsearch.xpack.ql.index.IndexResolution; import org.elasticsearch.xpack.ql.plan.logical.Aggregate; import org.elasticsearch.xpack.ql.plan.logical.EsRelation; import org.elasticsearch.xpack.ql.plan.logical.Filter; @@ -333,6 +337,8 @@ public static List namedTypeEntries() { of(ESQL_UNARY_SCLR_CLS, ToDatetime.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, ToDegrees.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, ToDouble.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), + of(ESQL_UNARY_SCLR_CLS, ToGeoShape.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), + of(ESQL_UNARY_SCLR_CLS, ToCartesianShape.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, ToGeoPoint.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, ToIP.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, ToInteger.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), @@ -482,15 +488,25 @@ static void writeEvalExec(PlanStreamOutput out, EvalExec evalExec) throws IOExce } static EnrichExec readEnrichExec(PlanStreamInput in) throws IOException { - return new EnrichExec( - in.readSource(), - in.readPhysicalPlanNode(), - in.readNamedExpression(), - in.readString(), - in.readString(), - readEsIndex(in), - readNamedExpressions(in) - ); + final Source source = in.readSource(); + final PhysicalPlan child = in.readPhysicalPlanNode(); + final NamedExpression matchField = in.readNamedExpression(); + final String policyName = in.readString(); + final String policyMatchField = in.readString(); + final Map concreteIndices; + final Enrich.Mode mode; + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_MULTI_CLUSTERS_ENRICH)) { + mode = in.readEnum(Enrich.Mode.class); + concreteIndices = in.readMap(StreamInput::readString, StreamInput::readString); + } else { + mode = Enrich.Mode.ANY; + EsIndex esIndex = readEsIndex(in); + if (esIndex.concreteIndices().size() != 1) { + throw new IllegalStateException("expected a single concrete enrich index; got " + esIndex.concreteIndices()); + } + concreteIndices = Map.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, Iterables.get(esIndex.concreteIndices(), 0)); + } + return new EnrichExec(source, child, mode, matchField, policyName, policyMatchField, concreteIndices, readNamedExpressions(in)); } static void writeEnrichExec(PlanStreamOutput out, EnrichExec enrich) throws IOException { @@ -499,7 +515,17 @@ static void writeEnrichExec(PlanStreamOutput out, EnrichExec enrich) throws IOEx out.writeNamedExpression(enrich.matchField()); out.writeString(enrich.policyName()); out.writeString(enrich.policyMatchField()); - writeEsIndex(out, enrich.enrichIndex()); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_MULTI_CLUSTERS_ENRICH)) { + out.writeEnum(enrich.mode()); + out.writeMap(enrich.concreteIndices(), StreamOutput::writeString, StreamOutput::writeString); + } else { + if (enrich.concreteIndices().keySet().equals(Set.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY))) { + String concreteIndex = enrich.concreteIndices().get(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + writeEsIndex(out, new EsIndex(concreteIndex, Map.of(), Set.of(concreteIndex))); + } else { + throw new IllegalStateException("expected a single concrete enrich index; got " + enrich.concreteIndices()); + } + } writeNamedExpressions(out, enrich.enrichFields()); } @@ -725,19 +751,29 @@ static void writeEval(PlanStreamOutput out, Eval eval) throws IOException { } static Enrich readEnrich(PlanStreamInput in) throws IOException { - Enrich.Mode m = Enrich.Mode.ANY; + Enrich.Mode mode = Enrich.Mode.ANY; if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_ENRICH_POLICY_CCQ_MODE)) { - m = in.readEnum(Enrich.Mode.class); + mode = in.readEnum(Enrich.Mode.class); } - return new Enrich( - in.readSource(), - in.readLogicalPlanNode(), - m, - in.readExpression(), - in.readNamedExpression(), - new EnrichPolicyResolution(in.readString(), new EnrichPolicy(in), IndexResolution.valid(readEsIndex(in))), - readNamedExpressions(in) - ); + final Source source = in.readSource(); + final LogicalPlan child = in.readLogicalPlanNode(); + final Expression policyName = in.readExpression(); + final NamedExpression matchField = in.readNamedExpression(); + if (in.getTransportVersion().before(TransportVersions.ESQL_MULTI_CLUSTERS_ENRICH)) { + in.readString(); // discard the old policy name + } + final EnrichPolicy policy = new EnrichPolicy(in); + final Map concreteIndices; + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_MULTI_CLUSTERS_ENRICH)) { + concreteIndices = in.readMap(StreamInput::readString, StreamInput::readString); + } else { + EsIndex esIndex = readEsIndex(in); + if (esIndex.concreteIndices().size() > 1) { + throw new IllegalStateException("expected a single enrich index; got " + esIndex); + } + concreteIndices = Map.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, Iterables.get(esIndex.concreteIndices(), 0)); + } + return new Enrich(source, child, mode, policyName, matchField, policy, concreteIndices, readNamedExpressions(in)); } static void writeEnrich(PlanStreamOutput out, Enrich enrich) throws IOException { @@ -749,9 +785,22 @@ static void writeEnrich(PlanStreamOutput out, Enrich enrich) throws IOException out.writeLogicalPlanNode(enrich.child()); out.writeExpression(enrich.policyName()); out.writeNamedExpression(enrich.matchField()); - out.writeString(enrich.policy().policyName()); - enrich.policy().policy().writeTo(out); - writeEsIndex(out, enrich.policy().index().get()); + if (out.getTransportVersion().before(TransportVersions.ESQL_MULTI_CLUSTERS_ENRICH)) { + out.writeString(BytesRefs.toString(enrich.policyName().fold())); // old policy name + } + enrich.policy().writeTo(out); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_MULTI_CLUSTERS_ENRICH)) { + out.writeMap(enrich.concreteIndices(), StreamOutput::writeString, StreamOutput::writeString); + } else { + Map concreteIndices = enrich.concreteIndices(); + if (concreteIndices.keySet().equals(Set.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY))) { + String enrichIndex = concreteIndices.get(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + EsIndex esIndex = new EsIndex(enrichIndex, Map.of(), Set.of(enrichIndex)); + writeEsIndex(out, esIndex); + } else { + throw new IllegalStateException("expected a single enrich index; got " + concreteIndices); + } + } writeNamedExpressions(out, enrich.enrichFields()); } @@ -1180,6 +1229,8 @@ static void writeBinaryLogic(PlanStreamOutput out, BinaryLogic binaryLogic) thro entry(name(ToDatetime.class), ToDatetime::new), entry(name(ToDegrees.class), ToDegrees::new), entry(name(ToDouble.class), ToDouble::new), + entry(name(ToGeoShape.class), ToGeoShape::new), + entry(name(ToCartesianShape.class), ToCartesianShape::new), entry(name(ToGeoPoint.class), ToGeoPoint::new), entry(name(ToIP.class), ToIP::new), entry(name(ToInteger.class), ToInteger::new), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index e4f67838731a0..699206240292b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -153,6 +153,7 @@ protected static List> rules() { Limiter.ONCE, new SubstituteSurrogates(), new ReplaceRegexMatch(), + new ReplaceNestedExpressionWithEval(), new ReplaceAliasingEvalWithProject() // new NormalizeAggregate(), - waits on https://github.com/elastic/elasticsearch/issues/100634 ); @@ -245,7 +246,7 @@ protected LogicalPlan rule(Aggregate aggregate) { return plan; } - private static String temporaryName(NamedExpression agg, AggregateFunction af) { + static String temporaryName(NamedExpression agg, AggregateFunction af) { return "__" + agg.name() + "_" + af.functionName() + "@" + Integer.toHexString(af.hashCode()); } } @@ -1056,6 +1057,109 @@ protected Expression regexToEquals(RegexMatch regexMatch, Literal literal) { } } + /** + * Replace nested expressions inside an aggregate with synthetic eval (which end up being projected away by the aggregate). + * stats sum(a + 1) by x % 2 + * becomes + * eval `a + 1` = a + 1, `x % 2` = x % 2 | stats sum(`a+1`_ref) by `x % 2`_ref + */ + static class ReplaceNestedExpressionWithEval extends OptimizerRules.OptimizerRule { + + @Override + protected LogicalPlan rule(Aggregate aggregate) { + List evals = new ArrayList<>(); + Map evalNames = new HashMap<>(); + List newGroupings = new ArrayList<>(aggregate.groupings()); + boolean groupingChanged = false; + + // start with the groupings since the aggs might duplicate it + for (int i = 0, s = newGroupings.size(); i < s; i++) { + Expression g = newGroupings.get(i); + // move the alias into an eval and replace it with its attribute + if (g instanceof Alias as) { + groupingChanged = true; + var attr = as.toAttribute(); + evals.add(as); + evalNames.put(as.name(), attr); + newGroupings.set(i, attr); + } + } + + Holder aggsChanged = new Holder<>(false); + List aggs = aggregate.aggregates(); + List newAggs = new ArrayList<>(aggs.size()); + + // map to track common expressions + Map expToAttribute = new HashMap<>(); + for (Alias a : evals) { + expToAttribute.put(a.child().canonical(), a.toAttribute()); + } + + // for the aggs make sure to unwrap the agg function and check the existing groupings + for (int i = 0, s = aggs.size(); i < s; i++) { + NamedExpression agg = aggs.get(i); + + NamedExpression a = (NamedExpression) agg.transformDown(Alias.class, as -> { + // if the child a nested expression + Expression child = as.child(); + + // shortcut for common scenario + if (child instanceof AggregateFunction af && af.field() instanceof Attribute) { + return as; + } + + // check if the alias matches any from grouping otherwise unwrap it + Attribute ref = evalNames.get(as.name()); + if (ref != null) { + aggsChanged.set(true); + return ref; + } + + // TODO: break expression into aggregate functions (sum(x + 1) / max(y + 2)) + // List afs = a.collectFirstChildren(AggregateFunction.class::isInstance); + + // 1. look for the aggregate function + var replaced = child.transformUp(AggregateFunction.class, af -> { + Expression result = af; + + Expression field = af.field(); + // 2. if the field is a nested expression (not attribute or literal), replace it + if (field instanceof Attribute == false && field.foldable() == false) { + // 3. create a new alias if one doesn't exist yet no reference + Attribute attr = expToAttribute.computeIfAbsent(field.canonical(), k -> { + Alias newAlias = new Alias(k.source(), temporaryName(agg, af), null, k, null, true); + evals.add(newAlias); + aggsChanged.set(true); + return newAlias.toAttribute(); + }); + // replace field with attribute + result = af.replaceChildren(Collections.singletonList(attr)); + } + return result; + }); + + return as.replaceChild(replaced); + }); + + newAggs.add(a); + } + + if (evals.size() > 0) { + var groupings = groupingChanged ? newGroupings : aggregate.groupings(); + var aggregates = aggsChanged.get() ? newAggs : aggregate.aggregates(); + + var newEval = new Eval(aggregate.source(), aggregate.child(), evals); + aggregate = new Aggregate(aggregate.source(), newEval, groupings, aggregates); + } + + return aggregate; + } + + static String temporaryName(NamedExpression agg, AggregateFunction af) { + return SubstituteSurrogates.temporaryName(agg, af); + } + } + /** * Replace aliasing evals (eval x=a) with a projection which can be further combined / simplified. * The rule gets applied only if there's another project (Project/Stats) above it. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.interp b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.interp index 823e56b88b0dd..684ad86d9a7c3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.interp +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.interp @@ -232,7 +232,6 @@ metadata evalCommand statsCommand inlinestatsCommand -grouping fromIdentifier qualifiedName qualifiedNamePattern @@ -266,4 +265,4 @@ setting atn: -[4, 1, 104, 533, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 110, 8, 1, 10, 1, 12, 1, 113, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 119, 8, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 134, 8, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 146, 8, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 153, 8, 5, 10, 5, 12, 5, 156, 9, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 163, 8, 5, 1, 5, 1, 5, 3, 5, 167, 8, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 175, 8, 5, 10, 5, 12, 5, 178, 9, 5, 1, 6, 1, 6, 3, 6, 182, 8, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 3, 6, 189, 8, 6, 1, 6, 1, 6, 1, 6, 3, 6, 194, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 201, 8, 7, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 207, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 5, 8, 215, 8, 8, 10, 8, 12, 8, 218, 9, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 3, 9, 227, 8, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 5, 10, 235, 8, 10, 10, 10, 12, 10, 238, 9, 10, 3, 10, 240, 8, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 5, 12, 250, 8, 12, 10, 12, 12, 12, 253, 9, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 3, 13, 260, 8, 13, 1, 14, 1, 14, 1, 14, 1, 14, 5, 14, 266, 8, 14, 10, 14, 12, 14, 269, 9, 14, 1, 14, 3, 14, 272, 8, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 5, 15, 279, 8, 15, 10, 15, 12, 15, 282, 9, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 3, 17, 291, 8, 17, 1, 17, 1, 17, 3, 17, 295, 8, 17, 1, 18, 1, 18, 1, 18, 1, 18, 3, 18, 301, 8, 18, 1, 19, 1, 19, 1, 19, 5, 19, 306, 8, 19, 10, 19, 12, 19, 309, 9, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 5, 21, 316, 8, 21, 10, 21, 12, 21, 319, 9, 21, 1, 22, 1, 22, 1, 22, 5, 22, 324, 8, 22, 10, 22, 12, 22, 327, 9, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 5, 25, 346, 8, 25, 10, 25, 12, 25, 349, 9, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 5, 25, 357, 8, 25, 10, 25, 12, 25, 360, 9, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 5, 25, 368, 8, 25, 10, 25, 12, 25, 371, 9, 25, 1, 25, 1, 25, 3, 25, 375, 8, 25, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 5, 27, 384, 8, 27, 10, 27, 12, 27, 387, 9, 27, 1, 28, 1, 28, 3, 28, 391, 8, 28, 1, 28, 1, 28, 3, 28, 395, 8, 28, 1, 29, 1, 29, 1, 29, 1, 29, 5, 29, 401, 8, 29, 10, 29, 12, 29, 404, 9, 29, 1, 29, 1, 29, 1, 29, 1, 29, 5, 29, 410, 8, 29, 10, 29, 12, 29, 413, 9, 29, 3, 29, 415, 8, 29, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 421, 8, 30, 10, 30, 12, 30, 424, 9, 30, 1, 31, 1, 31, 1, 31, 1, 31, 5, 31, 430, 8, 31, 10, 31, 12, 31, 433, 9, 31, 1, 32, 1, 32, 1, 32, 1, 32, 1, 33, 1, 33, 1, 33, 1, 33, 3, 33, 443, 8, 33, 1, 34, 1, 34, 1, 34, 1, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 5, 36, 455, 8, 36, 10, 36, 12, 36, 458, 9, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 39, 1, 39, 3, 39, 468, 8, 39, 1, 40, 3, 40, 471, 8, 40, 1, 40, 1, 40, 1, 41, 3, 41, 476, 8, 41, 1, 41, 1, 41, 1, 42, 1, 42, 1, 43, 1, 43, 1, 44, 1, 44, 1, 44, 1, 45, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 1, 46, 1, 46, 3, 46, 495, 8, 46, 1, 47, 1, 47, 5, 47, 499, 8, 47, 10, 47, 12, 47, 502, 9, 47, 1, 47, 1, 47, 1, 47, 3, 47, 507, 8, 47, 1, 47, 1, 47, 1, 47, 1, 47, 5, 47, 513, 8, 47, 10, 47, 12, 47, 516, 9, 47, 3, 47, 518, 8, 47, 1, 48, 1, 48, 1, 48, 3, 48, 523, 8, 48, 1, 48, 1, 48, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 0, 3, 2, 10, 16, 50, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 0, 9, 1, 0, 58, 59, 1, 0, 60, 62, 2, 0, 66, 66, 71, 71, 1, 0, 65, 66, 2, 0, 66, 66, 75, 75, 2, 0, 32, 32, 35, 35, 1, 0, 38, 39, 2, 0, 37, 37, 51, 51, 1, 0, 52, 57, 562, 0, 100, 1, 0, 0, 0, 2, 103, 1, 0, 0, 0, 4, 118, 1, 0, 0, 0, 6, 133, 1, 0, 0, 0, 8, 135, 1, 0, 0, 0, 10, 166, 1, 0, 0, 0, 12, 193, 1, 0, 0, 0, 14, 200, 1, 0, 0, 0, 16, 206, 1, 0, 0, 0, 18, 226, 1, 0, 0, 0, 20, 228, 1, 0, 0, 0, 22, 243, 1, 0, 0, 0, 24, 246, 1, 0, 0, 0, 26, 259, 1, 0, 0, 0, 28, 261, 1, 0, 0, 0, 30, 273, 1, 0, 0, 0, 32, 285, 1, 0, 0, 0, 34, 288, 1, 0, 0, 0, 36, 296, 1, 0, 0, 0, 38, 302, 1, 0, 0, 0, 40, 310, 1, 0, 0, 0, 42, 312, 1, 0, 0, 0, 44, 320, 1, 0, 0, 0, 46, 328, 1, 0, 0, 0, 48, 330, 1, 0, 0, 0, 50, 374, 1, 0, 0, 0, 52, 376, 1, 0, 0, 0, 54, 379, 1, 0, 0, 0, 56, 388, 1, 0, 0, 0, 58, 414, 1, 0, 0, 0, 60, 416, 1, 0, 0, 0, 62, 425, 1, 0, 0, 0, 64, 434, 1, 0, 0, 0, 66, 438, 1, 0, 0, 0, 68, 444, 1, 0, 0, 0, 70, 448, 1, 0, 0, 0, 72, 451, 1, 0, 0, 0, 74, 459, 1, 0, 0, 0, 76, 463, 1, 0, 0, 0, 78, 467, 1, 0, 0, 0, 80, 470, 1, 0, 0, 0, 82, 475, 1, 0, 0, 0, 84, 479, 1, 0, 0, 0, 86, 481, 1, 0, 0, 0, 88, 483, 1, 0, 0, 0, 90, 486, 1, 0, 0, 0, 92, 494, 1, 0, 0, 0, 94, 496, 1, 0, 0, 0, 96, 522, 1, 0, 0, 0, 98, 526, 1, 0, 0, 0, 100, 101, 3, 2, 1, 0, 101, 102, 5, 0, 0, 1, 102, 1, 1, 0, 0, 0, 103, 104, 6, 1, -1, 0, 104, 105, 3, 4, 2, 0, 105, 111, 1, 0, 0, 0, 106, 107, 10, 1, 0, 0, 107, 108, 5, 26, 0, 0, 108, 110, 3, 6, 3, 0, 109, 106, 1, 0, 0, 0, 110, 113, 1, 0, 0, 0, 111, 109, 1, 0, 0, 0, 111, 112, 1, 0, 0, 0, 112, 3, 1, 0, 0, 0, 113, 111, 1, 0, 0, 0, 114, 119, 3, 88, 44, 0, 115, 119, 3, 28, 14, 0, 116, 119, 3, 22, 11, 0, 117, 119, 3, 92, 46, 0, 118, 114, 1, 0, 0, 0, 118, 115, 1, 0, 0, 0, 118, 116, 1, 0, 0, 0, 118, 117, 1, 0, 0, 0, 119, 5, 1, 0, 0, 0, 120, 134, 3, 32, 16, 0, 121, 134, 3, 36, 18, 0, 122, 134, 3, 52, 26, 0, 123, 134, 3, 58, 29, 0, 124, 134, 3, 54, 27, 0, 125, 134, 3, 34, 17, 0, 126, 134, 3, 8, 4, 0, 127, 134, 3, 60, 30, 0, 128, 134, 3, 62, 31, 0, 129, 134, 3, 66, 33, 0, 130, 134, 3, 68, 34, 0, 131, 134, 3, 94, 47, 0, 132, 134, 3, 70, 35, 0, 133, 120, 1, 0, 0, 0, 133, 121, 1, 0, 0, 0, 133, 122, 1, 0, 0, 0, 133, 123, 1, 0, 0, 0, 133, 124, 1, 0, 0, 0, 133, 125, 1, 0, 0, 0, 133, 126, 1, 0, 0, 0, 133, 127, 1, 0, 0, 0, 133, 128, 1, 0, 0, 0, 133, 129, 1, 0, 0, 0, 133, 130, 1, 0, 0, 0, 133, 131, 1, 0, 0, 0, 133, 132, 1, 0, 0, 0, 134, 7, 1, 0, 0, 0, 135, 136, 5, 18, 0, 0, 136, 137, 3, 10, 5, 0, 137, 9, 1, 0, 0, 0, 138, 139, 6, 5, -1, 0, 139, 140, 5, 44, 0, 0, 140, 167, 3, 10, 5, 7, 141, 167, 3, 14, 7, 0, 142, 167, 3, 12, 6, 0, 143, 145, 3, 14, 7, 0, 144, 146, 5, 44, 0, 0, 145, 144, 1, 0, 0, 0, 145, 146, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 148, 5, 41, 0, 0, 148, 149, 5, 40, 0, 0, 149, 154, 3, 14, 7, 0, 150, 151, 5, 34, 0, 0, 151, 153, 3, 14, 7, 0, 152, 150, 1, 0, 0, 0, 153, 156, 1, 0, 0, 0, 154, 152, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 157, 1, 0, 0, 0, 156, 154, 1, 0, 0, 0, 157, 158, 5, 50, 0, 0, 158, 167, 1, 0, 0, 0, 159, 160, 3, 14, 7, 0, 160, 162, 5, 42, 0, 0, 161, 163, 5, 44, 0, 0, 162, 161, 1, 0, 0, 0, 162, 163, 1, 0, 0, 0, 163, 164, 1, 0, 0, 0, 164, 165, 5, 45, 0, 0, 165, 167, 1, 0, 0, 0, 166, 138, 1, 0, 0, 0, 166, 141, 1, 0, 0, 0, 166, 142, 1, 0, 0, 0, 166, 143, 1, 0, 0, 0, 166, 159, 1, 0, 0, 0, 167, 176, 1, 0, 0, 0, 168, 169, 10, 4, 0, 0, 169, 170, 5, 31, 0, 0, 170, 175, 3, 10, 5, 5, 171, 172, 10, 3, 0, 0, 172, 173, 5, 47, 0, 0, 173, 175, 3, 10, 5, 4, 174, 168, 1, 0, 0, 0, 174, 171, 1, 0, 0, 0, 175, 178, 1, 0, 0, 0, 176, 174, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 11, 1, 0, 0, 0, 178, 176, 1, 0, 0, 0, 179, 181, 3, 14, 7, 0, 180, 182, 5, 44, 0, 0, 181, 180, 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 183, 1, 0, 0, 0, 183, 184, 5, 43, 0, 0, 184, 185, 3, 84, 42, 0, 185, 194, 1, 0, 0, 0, 186, 188, 3, 14, 7, 0, 187, 189, 5, 44, 0, 0, 188, 187, 1, 0, 0, 0, 188, 189, 1, 0, 0, 0, 189, 190, 1, 0, 0, 0, 190, 191, 5, 49, 0, 0, 191, 192, 3, 84, 42, 0, 192, 194, 1, 0, 0, 0, 193, 179, 1, 0, 0, 0, 193, 186, 1, 0, 0, 0, 194, 13, 1, 0, 0, 0, 195, 201, 3, 16, 8, 0, 196, 197, 3, 16, 8, 0, 197, 198, 3, 86, 43, 0, 198, 199, 3, 16, 8, 0, 199, 201, 1, 0, 0, 0, 200, 195, 1, 0, 0, 0, 200, 196, 1, 0, 0, 0, 201, 15, 1, 0, 0, 0, 202, 203, 6, 8, -1, 0, 203, 207, 3, 18, 9, 0, 204, 205, 7, 0, 0, 0, 205, 207, 3, 16, 8, 3, 206, 202, 1, 0, 0, 0, 206, 204, 1, 0, 0, 0, 207, 216, 1, 0, 0, 0, 208, 209, 10, 2, 0, 0, 209, 210, 7, 1, 0, 0, 210, 215, 3, 16, 8, 3, 211, 212, 10, 1, 0, 0, 212, 213, 7, 0, 0, 0, 213, 215, 3, 16, 8, 2, 214, 208, 1, 0, 0, 0, 214, 211, 1, 0, 0, 0, 215, 218, 1, 0, 0, 0, 216, 214, 1, 0, 0, 0, 216, 217, 1, 0, 0, 0, 217, 17, 1, 0, 0, 0, 218, 216, 1, 0, 0, 0, 219, 227, 3, 50, 25, 0, 220, 227, 3, 42, 21, 0, 221, 227, 3, 20, 10, 0, 222, 223, 5, 40, 0, 0, 223, 224, 3, 10, 5, 0, 224, 225, 5, 50, 0, 0, 225, 227, 1, 0, 0, 0, 226, 219, 1, 0, 0, 0, 226, 220, 1, 0, 0, 0, 226, 221, 1, 0, 0, 0, 226, 222, 1, 0, 0, 0, 227, 19, 1, 0, 0, 0, 228, 229, 3, 46, 23, 0, 229, 239, 5, 40, 0, 0, 230, 240, 5, 60, 0, 0, 231, 236, 3, 10, 5, 0, 232, 233, 5, 34, 0, 0, 233, 235, 3, 10, 5, 0, 234, 232, 1, 0, 0, 0, 235, 238, 1, 0, 0, 0, 236, 234, 1, 0, 0, 0, 236, 237, 1, 0, 0, 0, 237, 240, 1, 0, 0, 0, 238, 236, 1, 0, 0, 0, 239, 230, 1, 0, 0, 0, 239, 231, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, 242, 5, 50, 0, 0, 242, 21, 1, 0, 0, 0, 243, 244, 5, 14, 0, 0, 244, 245, 3, 24, 12, 0, 245, 23, 1, 0, 0, 0, 246, 251, 3, 26, 13, 0, 247, 248, 5, 34, 0, 0, 248, 250, 3, 26, 13, 0, 249, 247, 1, 0, 0, 0, 250, 253, 1, 0, 0, 0, 251, 249, 1, 0, 0, 0, 251, 252, 1, 0, 0, 0, 252, 25, 1, 0, 0, 0, 253, 251, 1, 0, 0, 0, 254, 260, 3, 10, 5, 0, 255, 256, 3, 42, 21, 0, 256, 257, 5, 33, 0, 0, 257, 258, 3, 10, 5, 0, 258, 260, 1, 0, 0, 0, 259, 254, 1, 0, 0, 0, 259, 255, 1, 0, 0, 0, 260, 27, 1, 0, 0, 0, 261, 262, 5, 6, 0, 0, 262, 267, 3, 40, 20, 0, 263, 264, 5, 34, 0, 0, 264, 266, 3, 40, 20, 0, 265, 263, 1, 0, 0, 0, 266, 269, 1, 0, 0, 0, 267, 265, 1, 0, 0, 0, 267, 268, 1, 0, 0, 0, 268, 271, 1, 0, 0, 0, 269, 267, 1, 0, 0, 0, 270, 272, 3, 30, 15, 0, 271, 270, 1, 0, 0, 0, 271, 272, 1, 0, 0, 0, 272, 29, 1, 0, 0, 0, 273, 274, 5, 63, 0, 0, 274, 275, 5, 70, 0, 0, 275, 280, 3, 40, 20, 0, 276, 277, 5, 34, 0, 0, 277, 279, 3, 40, 20, 0, 278, 276, 1, 0, 0, 0, 279, 282, 1, 0, 0, 0, 280, 278, 1, 0, 0, 0, 280, 281, 1, 0, 0, 0, 281, 283, 1, 0, 0, 0, 282, 280, 1, 0, 0, 0, 283, 284, 5, 64, 0, 0, 284, 31, 1, 0, 0, 0, 285, 286, 5, 4, 0, 0, 286, 287, 3, 24, 12, 0, 287, 33, 1, 0, 0, 0, 288, 290, 5, 17, 0, 0, 289, 291, 3, 24, 12, 0, 290, 289, 1, 0, 0, 0, 290, 291, 1, 0, 0, 0, 291, 294, 1, 0, 0, 0, 292, 293, 5, 30, 0, 0, 293, 295, 3, 38, 19, 0, 294, 292, 1, 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 35, 1, 0, 0, 0, 296, 297, 5, 8, 0, 0, 297, 300, 3, 24, 12, 0, 298, 299, 5, 30, 0, 0, 299, 301, 3, 38, 19, 0, 300, 298, 1, 0, 0, 0, 300, 301, 1, 0, 0, 0, 301, 37, 1, 0, 0, 0, 302, 307, 3, 42, 21, 0, 303, 304, 5, 34, 0, 0, 304, 306, 3, 42, 21, 0, 305, 303, 1, 0, 0, 0, 306, 309, 1, 0, 0, 0, 307, 305, 1, 0, 0, 0, 307, 308, 1, 0, 0, 0, 308, 39, 1, 0, 0, 0, 309, 307, 1, 0, 0, 0, 310, 311, 7, 2, 0, 0, 311, 41, 1, 0, 0, 0, 312, 317, 3, 46, 23, 0, 313, 314, 5, 36, 0, 0, 314, 316, 3, 46, 23, 0, 315, 313, 1, 0, 0, 0, 316, 319, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 317, 318, 1, 0, 0, 0, 318, 43, 1, 0, 0, 0, 319, 317, 1, 0, 0, 0, 320, 325, 3, 48, 24, 0, 321, 322, 5, 36, 0, 0, 322, 324, 3, 48, 24, 0, 323, 321, 1, 0, 0, 0, 324, 327, 1, 0, 0, 0, 325, 323, 1, 0, 0, 0, 325, 326, 1, 0, 0, 0, 326, 45, 1, 0, 0, 0, 327, 325, 1, 0, 0, 0, 328, 329, 7, 3, 0, 0, 329, 47, 1, 0, 0, 0, 330, 331, 7, 4, 0, 0, 331, 49, 1, 0, 0, 0, 332, 375, 5, 45, 0, 0, 333, 334, 3, 82, 41, 0, 334, 335, 5, 65, 0, 0, 335, 375, 1, 0, 0, 0, 336, 375, 3, 80, 40, 0, 337, 375, 3, 82, 41, 0, 338, 375, 3, 76, 38, 0, 339, 375, 5, 48, 0, 0, 340, 375, 3, 84, 42, 0, 341, 342, 5, 63, 0, 0, 342, 347, 3, 78, 39, 0, 343, 344, 5, 34, 0, 0, 344, 346, 3, 78, 39, 0, 345, 343, 1, 0, 0, 0, 346, 349, 1, 0, 0, 0, 347, 345, 1, 0, 0, 0, 347, 348, 1, 0, 0, 0, 348, 350, 1, 0, 0, 0, 349, 347, 1, 0, 0, 0, 350, 351, 5, 64, 0, 0, 351, 375, 1, 0, 0, 0, 352, 353, 5, 63, 0, 0, 353, 358, 3, 76, 38, 0, 354, 355, 5, 34, 0, 0, 355, 357, 3, 76, 38, 0, 356, 354, 1, 0, 0, 0, 357, 360, 1, 0, 0, 0, 358, 356, 1, 0, 0, 0, 358, 359, 1, 0, 0, 0, 359, 361, 1, 0, 0, 0, 360, 358, 1, 0, 0, 0, 361, 362, 5, 64, 0, 0, 362, 375, 1, 0, 0, 0, 363, 364, 5, 63, 0, 0, 364, 369, 3, 84, 42, 0, 365, 366, 5, 34, 0, 0, 366, 368, 3, 84, 42, 0, 367, 365, 1, 0, 0, 0, 368, 371, 1, 0, 0, 0, 369, 367, 1, 0, 0, 0, 369, 370, 1, 0, 0, 0, 370, 372, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 372, 373, 5, 64, 0, 0, 373, 375, 1, 0, 0, 0, 374, 332, 1, 0, 0, 0, 374, 333, 1, 0, 0, 0, 374, 336, 1, 0, 0, 0, 374, 337, 1, 0, 0, 0, 374, 338, 1, 0, 0, 0, 374, 339, 1, 0, 0, 0, 374, 340, 1, 0, 0, 0, 374, 341, 1, 0, 0, 0, 374, 352, 1, 0, 0, 0, 374, 363, 1, 0, 0, 0, 375, 51, 1, 0, 0, 0, 376, 377, 5, 10, 0, 0, 377, 378, 5, 28, 0, 0, 378, 53, 1, 0, 0, 0, 379, 380, 5, 16, 0, 0, 380, 385, 3, 56, 28, 0, 381, 382, 5, 34, 0, 0, 382, 384, 3, 56, 28, 0, 383, 381, 1, 0, 0, 0, 384, 387, 1, 0, 0, 0, 385, 383, 1, 0, 0, 0, 385, 386, 1, 0, 0, 0, 386, 55, 1, 0, 0, 0, 387, 385, 1, 0, 0, 0, 388, 390, 3, 10, 5, 0, 389, 391, 7, 5, 0, 0, 390, 389, 1, 0, 0, 0, 390, 391, 1, 0, 0, 0, 391, 394, 1, 0, 0, 0, 392, 393, 5, 46, 0, 0, 393, 395, 7, 6, 0, 0, 394, 392, 1, 0, 0, 0, 394, 395, 1, 0, 0, 0, 395, 57, 1, 0, 0, 0, 396, 397, 5, 9, 0, 0, 397, 402, 3, 44, 22, 0, 398, 399, 5, 34, 0, 0, 399, 401, 3, 44, 22, 0, 400, 398, 1, 0, 0, 0, 401, 404, 1, 0, 0, 0, 402, 400, 1, 0, 0, 0, 402, 403, 1, 0, 0, 0, 403, 415, 1, 0, 0, 0, 404, 402, 1, 0, 0, 0, 405, 406, 5, 12, 0, 0, 406, 411, 3, 44, 22, 0, 407, 408, 5, 34, 0, 0, 408, 410, 3, 44, 22, 0, 409, 407, 1, 0, 0, 0, 410, 413, 1, 0, 0, 0, 411, 409, 1, 0, 0, 0, 411, 412, 1, 0, 0, 0, 412, 415, 1, 0, 0, 0, 413, 411, 1, 0, 0, 0, 414, 396, 1, 0, 0, 0, 414, 405, 1, 0, 0, 0, 415, 59, 1, 0, 0, 0, 416, 417, 5, 2, 0, 0, 417, 422, 3, 44, 22, 0, 418, 419, 5, 34, 0, 0, 419, 421, 3, 44, 22, 0, 420, 418, 1, 0, 0, 0, 421, 424, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 422, 423, 1, 0, 0, 0, 423, 61, 1, 0, 0, 0, 424, 422, 1, 0, 0, 0, 425, 426, 5, 13, 0, 0, 426, 431, 3, 64, 32, 0, 427, 428, 5, 34, 0, 0, 428, 430, 3, 64, 32, 0, 429, 427, 1, 0, 0, 0, 430, 433, 1, 0, 0, 0, 431, 429, 1, 0, 0, 0, 431, 432, 1, 0, 0, 0, 432, 63, 1, 0, 0, 0, 433, 431, 1, 0, 0, 0, 434, 435, 3, 44, 22, 0, 435, 436, 5, 79, 0, 0, 436, 437, 3, 44, 22, 0, 437, 65, 1, 0, 0, 0, 438, 439, 5, 1, 0, 0, 439, 440, 3, 18, 9, 0, 440, 442, 3, 84, 42, 0, 441, 443, 3, 72, 36, 0, 442, 441, 1, 0, 0, 0, 442, 443, 1, 0, 0, 0, 443, 67, 1, 0, 0, 0, 444, 445, 5, 7, 0, 0, 445, 446, 3, 18, 9, 0, 446, 447, 3, 84, 42, 0, 447, 69, 1, 0, 0, 0, 448, 449, 5, 11, 0, 0, 449, 450, 3, 42, 21, 0, 450, 71, 1, 0, 0, 0, 451, 456, 3, 74, 37, 0, 452, 453, 5, 34, 0, 0, 453, 455, 3, 74, 37, 0, 454, 452, 1, 0, 0, 0, 455, 458, 1, 0, 0, 0, 456, 454, 1, 0, 0, 0, 456, 457, 1, 0, 0, 0, 457, 73, 1, 0, 0, 0, 458, 456, 1, 0, 0, 0, 459, 460, 3, 46, 23, 0, 460, 461, 5, 33, 0, 0, 461, 462, 3, 50, 25, 0, 462, 75, 1, 0, 0, 0, 463, 464, 7, 7, 0, 0, 464, 77, 1, 0, 0, 0, 465, 468, 3, 80, 40, 0, 466, 468, 3, 82, 41, 0, 467, 465, 1, 0, 0, 0, 467, 466, 1, 0, 0, 0, 468, 79, 1, 0, 0, 0, 469, 471, 7, 0, 0, 0, 470, 469, 1, 0, 0, 0, 470, 471, 1, 0, 0, 0, 471, 472, 1, 0, 0, 0, 472, 473, 5, 29, 0, 0, 473, 81, 1, 0, 0, 0, 474, 476, 7, 0, 0, 0, 475, 474, 1, 0, 0, 0, 475, 476, 1, 0, 0, 0, 476, 477, 1, 0, 0, 0, 477, 478, 5, 28, 0, 0, 478, 83, 1, 0, 0, 0, 479, 480, 5, 27, 0, 0, 480, 85, 1, 0, 0, 0, 481, 482, 7, 8, 0, 0, 482, 87, 1, 0, 0, 0, 483, 484, 5, 5, 0, 0, 484, 485, 3, 90, 45, 0, 485, 89, 1, 0, 0, 0, 486, 487, 5, 63, 0, 0, 487, 488, 3, 2, 1, 0, 488, 489, 5, 64, 0, 0, 489, 91, 1, 0, 0, 0, 490, 491, 5, 15, 0, 0, 491, 495, 5, 95, 0, 0, 492, 493, 5, 15, 0, 0, 493, 495, 5, 96, 0, 0, 494, 490, 1, 0, 0, 0, 494, 492, 1, 0, 0, 0, 495, 93, 1, 0, 0, 0, 496, 500, 5, 3, 0, 0, 497, 499, 3, 98, 49, 0, 498, 497, 1, 0, 0, 0, 499, 502, 1, 0, 0, 0, 500, 498, 1, 0, 0, 0, 500, 501, 1, 0, 0, 0, 501, 503, 1, 0, 0, 0, 502, 500, 1, 0, 0, 0, 503, 506, 5, 85, 0, 0, 504, 505, 5, 83, 0, 0, 505, 507, 3, 44, 22, 0, 506, 504, 1, 0, 0, 0, 506, 507, 1, 0, 0, 0, 507, 517, 1, 0, 0, 0, 508, 509, 5, 84, 0, 0, 509, 514, 3, 96, 48, 0, 510, 511, 5, 34, 0, 0, 511, 513, 3, 96, 48, 0, 512, 510, 1, 0, 0, 0, 513, 516, 1, 0, 0, 0, 514, 512, 1, 0, 0, 0, 514, 515, 1, 0, 0, 0, 515, 518, 1, 0, 0, 0, 516, 514, 1, 0, 0, 0, 517, 508, 1, 0, 0, 0, 517, 518, 1, 0, 0, 0, 518, 95, 1, 0, 0, 0, 519, 520, 3, 44, 22, 0, 520, 521, 5, 33, 0, 0, 521, 523, 1, 0, 0, 0, 522, 519, 1, 0, 0, 0, 522, 523, 1, 0, 0, 0, 523, 524, 1, 0, 0, 0, 524, 525, 3, 44, 22, 0, 525, 97, 1, 0, 0, 0, 526, 527, 5, 63, 0, 0, 527, 528, 5, 101, 0, 0, 528, 529, 5, 100, 0, 0, 529, 530, 5, 101, 0, 0, 530, 531, 5, 64, 0, 0, 531, 99, 1, 0, 0, 0, 53, 111, 118, 133, 145, 154, 162, 166, 174, 176, 181, 188, 193, 200, 206, 214, 216, 226, 236, 239, 251, 259, 267, 271, 280, 290, 294, 300, 307, 317, 325, 347, 358, 369, 374, 385, 390, 394, 402, 411, 414, 422, 431, 442, 456, 467, 470, 475, 494, 500, 506, 514, 517, 522] \ No newline at end of file +[4, 1, 104, 523, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 108, 8, 1, 10, 1, 12, 1, 111, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 117, 8, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 132, 8, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 144, 8, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 151, 8, 5, 10, 5, 12, 5, 154, 9, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 161, 8, 5, 1, 5, 1, 5, 3, 5, 165, 8, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 173, 8, 5, 10, 5, 12, 5, 176, 9, 5, 1, 6, 1, 6, 3, 6, 180, 8, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 3, 6, 187, 8, 6, 1, 6, 1, 6, 1, 6, 3, 6, 192, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 199, 8, 7, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 205, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 5, 8, 213, 8, 8, 10, 8, 12, 8, 216, 9, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 3, 9, 225, 8, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 5, 10, 233, 8, 10, 10, 10, 12, 10, 236, 9, 10, 3, 10, 238, 8, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 5, 12, 248, 8, 12, 10, 12, 12, 12, 251, 9, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 3, 13, 258, 8, 13, 1, 14, 1, 14, 1, 14, 1, 14, 5, 14, 264, 8, 14, 10, 14, 12, 14, 267, 9, 14, 1, 14, 3, 14, 270, 8, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 5, 15, 277, 8, 15, 10, 15, 12, 15, 280, 9, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 3, 17, 289, 8, 17, 1, 17, 1, 17, 3, 17, 293, 8, 17, 1, 18, 1, 18, 1, 18, 1, 18, 3, 18, 299, 8, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 20, 5, 20, 306, 8, 20, 10, 20, 12, 20, 309, 9, 20, 1, 21, 1, 21, 1, 21, 5, 21, 314, 8, 21, 10, 21, 12, 21, 317, 9, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 5, 24, 336, 8, 24, 10, 24, 12, 24, 339, 9, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 5, 24, 347, 8, 24, 10, 24, 12, 24, 350, 9, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 5, 24, 358, 8, 24, 10, 24, 12, 24, 361, 9, 24, 1, 24, 1, 24, 3, 24, 365, 8, 24, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 5, 26, 374, 8, 26, 10, 26, 12, 26, 377, 9, 26, 1, 27, 1, 27, 3, 27, 381, 8, 27, 1, 27, 1, 27, 3, 27, 385, 8, 27, 1, 28, 1, 28, 1, 28, 1, 28, 5, 28, 391, 8, 28, 10, 28, 12, 28, 394, 9, 28, 1, 28, 1, 28, 1, 28, 1, 28, 5, 28, 400, 8, 28, 10, 28, 12, 28, 403, 9, 28, 3, 28, 405, 8, 28, 1, 29, 1, 29, 1, 29, 1, 29, 5, 29, 411, 8, 29, 10, 29, 12, 29, 414, 9, 29, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 420, 8, 30, 10, 30, 12, 30, 423, 9, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 433, 8, 32, 1, 33, 1, 33, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 35, 1, 35, 1, 35, 5, 35, 445, 8, 35, 10, 35, 12, 35, 448, 9, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 38, 1, 38, 3, 38, 458, 8, 38, 1, 39, 3, 39, 461, 8, 39, 1, 39, 1, 39, 1, 40, 3, 40, 466, 8, 40, 1, 40, 1, 40, 1, 41, 1, 41, 1, 42, 1, 42, 1, 43, 1, 43, 1, 43, 1, 44, 1, 44, 1, 44, 1, 44, 1, 45, 1, 45, 1, 45, 1, 45, 3, 45, 485, 8, 45, 1, 46, 1, 46, 5, 46, 489, 8, 46, 10, 46, 12, 46, 492, 9, 46, 1, 46, 1, 46, 1, 46, 3, 46, 497, 8, 46, 1, 46, 1, 46, 1, 46, 1, 46, 5, 46, 503, 8, 46, 10, 46, 12, 46, 506, 9, 46, 3, 46, 508, 8, 46, 1, 47, 1, 47, 1, 47, 3, 47, 513, 8, 47, 1, 47, 1, 47, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 0, 3, 2, 10, 16, 49, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 0, 9, 1, 0, 58, 59, 1, 0, 60, 62, 2, 0, 66, 66, 71, 71, 1, 0, 65, 66, 2, 0, 66, 66, 75, 75, 2, 0, 32, 32, 35, 35, 1, 0, 38, 39, 2, 0, 37, 37, 51, 51, 1, 0, 52, 57, 552, 0, 98, 1, 0, 0, 0, 2, 101, 1, 0, 0, 0, 4, 116, 1, 0, 0, 0, 6, 131, 1, 0, 0, 0, 8, 133, 1, 0, 0, 0, 10, 164, 1, 0, 0, 0, 12, 191, 1, 0, 0, 0, 14, 198, 1, 0, 0, 0, 16, 204, 1, 0, 0, 0, 18, 224, 1, 0, 0, 0, 20, 226, 1, 0, 0, 0, 22, 241, 1, 0, 0, 0, 24, 244, 1, 0, 0, 0, 26, 257, 1, 0, 0, 0, 28, 259, 1, 0, 0, 0, 30, 271, 1, 0, 0, 0, 32, 283, 1, 0, 0, 0, 34, 286, 1, 0, 0, 0, 36, 294, 1, 0, 0, 0, 38, 300, 1, 0, 0, 0, 40, 302, 1, 0, 0, 0, 42, 310, 1, 0, 0, 0, 44, 318, 1, 0, 0, 0, 46, 320, 1, 0, 0, 0, 48, 364, 1, 0, 0, 0, 50, 366, 1, 0, 0, 0, 52, 369, 1, 0, 0, 0, 54, 378, 1, 0, 0, 0, 56, 404, 1, 0, 0, 0, 58, 406, 1, 0, 0, 0, 60, 415, 1, 0, 0, 0, 62, 424, 1, 0, 0, 0, 64, 428, 1, 0, 0, 0, 66, 434, 1, 0, 0, 0, 68, 438, 1, 0, 0, 0, 70, 441, 1, 0, 0, 0, 72, 449, 1, 0, 0, 0, 74, 453, 1, 0, 0, 0, 76, 457, 1, 0, 0, 0, 78, 460, 1, 0, 0, 0, 80, 465, 1, 0, 0, 0, 82, 469, 1, 0, 0, 0, 84, 471, 1, 0, 0, 0, 86, 473, 1, 0, 0, 0, 88, 476, 1, 0, 0, 0, 90, 484, 1, 0, 0, 0, 92, 486, 1, 0, 0, 0, 94, 512, 1, 0, 0, 0, 96, 516, 1, 0, 0, 0, 98, 99, 3, 2, 1, 0, 99, 100, 5, 0, 0, 1, 100, 1, 1, 0, 0, 0, 101, 102, 6, 1, -1, 0, 102, 103, 3, 4, 2, 0, 103, 109, 1, 0, 0, 0, 104, 105, 10, 1, 0, 0, 105, 106, 5, 26, 0, 0, 106, 108, 3, 6, 3, 0, 107, 104, 1, 0, 0, 0, 108, 111, 1, 0, 0, 0, 109, 107, 1, 0, 0, 0, 109, 110, 1, 0, 0, 0, 110, 3, 1, 0, 0, 0, 111, 109, 1, 0, 0, 0, 112, 117, 3, 86, 43, 0, 113, 117, 3, 28, 14, 0, 114, 117, 3, 22, 11, 0, 115, 117, 3, 90, 45, 0, 116, 112, 1, 0, 0, 0, 116, 113, 1, 0, 0, 0, 116, 114, 1, 0, 0, 0, 116, 115, 1, 0, 0, 0, 117, 5, 1, 0, 0, 0, 118, 132, 3, 32, 16, 0, 119, 132, 3, 36, 18, 0, 120, 132, 3, 50, 25, 0, 121, 132, 3, 56, 28, 0, 122, 132, 3, 52, 26, 0, 123, 132, 3, 34, 17, 0, 124, 132, 3, 8, 4, 0, 125, 132, 3, 58, 29, 0, 126, 132, 3, 60, 30, 0, 127, 132, 3, 64, 32, 0, 128, 132, 3, 66, 33, 0, 129, 132, 3, 92, 46, 0, 130, 132, 3, 68, 34, 0, 131, 118, 1, 0, 0, 0, 131, 119, 1, 0, 0, 0, 131, 120, 1, 0, 0, 0, 131, 121, 1, 0, 0, 0, 131, 122, 1, 0, 0, 0, 131, 123, 1, 0, 0, 0, 131, 124, 1, 0, 0, 0, 131, 125, 1, 0, 0, 0, 131, 126, 1, 0, 0, 0, 131, 127, 1, 0, 0, 0, 131, 128, 1, 0, 0, 0, 131, 129, 1, 0, 0, 0, 131, 130, 1, 0, 0, 0, 132, 7, 1, 0, 0, 0, 133, 134, 5, 18, 0, 0, 134, 135, 3, 10, 5, 0, 135, 9, 1, 0, 0, 0, 136, 137, 6, 5, -1, 0, 137, 138, 5, 44, 0, 0, 138, 165, 3, 10, 5, 7, 139, 165, 3, 14, 7, 0, 140, 165, 3, 12, 6, 0, 141, 143, 3, 14, 7, 0, 142, 144, 5, 44, 0, 0, 143, 142, 1, 0, 0, 0, 143, 144, 1, 0, 0, 0, 144, 145, 1, 0, 0, 0, 145, 146, 5, 41, 0, 0, 146, 147, 5, 40, 0, 0, 147, 152, 3, 14, 7, 0, 148, 149, 5, 34, 0, 0, 149, 151, 3, 14, 7, 0, 150, 148, 1, 0, 0, 0, 151, 154, 1, 0, 0, 0, 152, 150, 1, 0, 0, 0, 152, 153, 1, 0, 0, 0, 153, 155, 1, 0, 0, 0, 154, 152, 1, 0, 0, 0, 155, 156, 5, 50, 0, 0, 156, 165, 1, 0, 0, 0, 157, 158, 3, 14, 7, 0, 158, 160, 5, 42, 0, 0, 159, 161, 5, 44, 0, 0, 160, 159, 1, 0, 0, 0, 160, 161, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 5, 45, 0, 0, 163, 165, 1, 0, 0, 0, 164, 136, 1, 0, 0, 0, 164, 139, 1, 0, 0, 0, 164, 140, 1, 0, 0, 0, 164, 141, 1, 0, 0, 0, 164, 157, 1, 0, 0, 0, 165, 174, 1, 0, 0, 0, 166, 167, 10, 4, 0, 0, 167, 168, 5, 31, 0, 0, 168, 173, 3, 10, 5, 5, 169, 170, 10, 3, 0, 0, 170, 171, 5, 47, 0, 0, 171, 173, 3, 10, 5, 4, 172, 166, 1, 0, 0, 0, 172, 169, 1, 0, 0, 0, 173, 176, 1, 0, 0, 0, 174, 172, 1, 0, 0, 0, 174, 175, 1, 0, 0, 0, 175, 11, 1, 0, 0, 0, 176, 174, 1, 0, 0, 0, 177, 179, 3, 14, 7, 0, 178, 180, 5, 44, 0, 0, 179, 178, 1, 0, 0, 0, 179, 180, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 182, 5, 43, 0, 0, 182, 183, 3, 82, 41, 0, 183, 192, 1, 0, 0, 0, 184, 186, 3, 14, 7, 0, 185, 187, 5, 44, 0, 0, 186, 185, 1, 0, 0, 0, 186, 187, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 189, 5, 49, 0, 0, 189, 190, 3, 82, 41, 0, 190, 192, 1, 0, 0, 0, 191, 177, 1, 0, 0, 0, 191, 184, 1, 0, 0, 0, 192, 13, 1, 0, 0, 0, 193, 199, 3, 16, 8, 0, 194, 195, 3, 16, 8, 0, 195, 196, 3, 84, 42, 0, 196, 197, 3, 16, 8, 0, 197, 199, 1, 0, 0, 0, 198, 193, 1, 0, 0, 0, 198, 194, 1, 0, 0, 0, 199, 15, 1, 0, 0, 0, 200, 201, 6, 8, -1, 0, 201, 205, 3, 18, 9, 0, 202, 203, 7, 0, 0, 0, 203, 205, 3, 16, 8, 3, 204, 200, 1, 0, 0, 0, 204, 202, 1, 0, 0, 0, 205, 214, 1, 0, 0, 0, 206, 207, 10, 2, 0, 0, 207, 208, 7, 1, 0, 0, 208, 213, 3, 16, 8, 3, 209, 210, 10, 1, 0, 0, 210, 211, 7, 0, 0, 0, 211, 213, 3, 16, 8, 2, 212, 206, 1, 0, 0, 0, 212, 209, 1, 0, 0, 0, 213, 216, 1, 0, 0, 0, 214, 212, 1, 0, 0, 0, 214, 215, 1, 0, 0, 0, 215, 17, 1, 0, 0, 0, 216, 214, 1, 0, 0, 0, 217, 225, 3, 48, 24, 0, 218, 225, 3, 40, 20, 0, 219, 225, 3, 20, 10, 0, 220, 221, 5, 40, 0, 0, 221, 222, 3, 10, 5, 0, 222, 223, 5, 50, 0, 0, 223, 225, 1, 0, 0, 0, 224, 217, 1, 0, 0, 0, 224, 218, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 224, 220, 1, 0, 0, 0, 225, 19, 1, 0, 0, 0, 226, 227, 3, 44, 22, 0, 227, 237, 5, 40, 0, 0, 228, 238, 5, 60, 0, 0, 229, 234, 3, 10, 5, 0, 230, 231, 5, 34, 0, 0, 231, 233, 3, 10, 5, 0, 232, 230, 1, 0, 0, 0, 233, 236, 1, 0, 0, 0, 234, 232, 1, 0, 0, 0, 234, 235, 1, 0, 0, 0, 235, 238, 1, 0, 0, 0, 236, 234, 1, 0, 0, 0, 237, 228, 1, 0, 0, 0, 237, 229, 1, 0, 0, 0, 237, 238, 1, 0, 0, 0, 238, 239, 1, 0, 0, 0, 239, 240, 5, 50, 0, 0, 240, 21, 1, 0, 0, 0, 241, 242, 5, 14, 0, 0, 242, 243, 3, 24, 12, 0, 243, 23, 1, 0, 0, 0, 244, 249, 3, 26, 13, 0, 245, 246, 5, 34, 0, 0, 246, 248, 3, 26, 13, 0, 247, 245, 1, 0, 0, 0, 248, 251, 1, 0, 0, 0, 249, 247, 1, 0, 0, 0, 249, 250, 1, 0, 0, 0, 250, 25, 1, 0, 0, 0, 251, 249, 1, 0, 0, 0, 252, 258, 3, 10, 5, 0, 253, 254, 3, 40, 20, 0, 254, 255, 5, 33, 0, 0, 255, 256, 3, 10, 5, 0, 256, 258, 1, 0, 0, 0, 257, 252, 1, 0, 0, 0, 257, 253, 1, 0, 0, 0, 258, 27, 1, 0, 0, 0, 259, 260, 5, 6, 0, 0, 260, 265, 3, 38, 19, 0, 261, 262, 5, 34, 0, 0, 262, 264, 3, 38, 19, 0, 263, 261, 1, 0, 0, 0, 264, 267, 1, 0, 0, 0, 265, 263, 1, 0, 0, 0, 265, 266, 1, 0, 0, 0, 266, 269, 1, 0, 0, 0, 267, 265, 1, 0, 0, 0, 268, 270, 3, 30, 15, 0, 269, 268, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 29, 1, 0, 0, 0, 271, 272, 5, 63, 0, 0, 272, 273, 5, 70, 0, 0, 273, 278, 3, 38, 19, 0, 274, 275, 5, 34, 0, 0, 275, 277, 3, 38, 19, 0, 276, 274, 1, 0, 0, 0, 277, 280, 1, 0, 0, 0, 278, 276, 1, 0, 0, 0, 278, 279, 1, 0, 0, 0, 279, 281, 1, 0, 0, 0, 280, 278, 1, 0, 0, 0, 281, 282, 5, 64, 0, 0, 282, 31, 1, 0, 0, 0, 283, 284, 5, 4, 0, 0, 284, 285, 3, 24, 12, 0, 285, 33, 1, 0, 0, 0, 286, 288, 5, 17, 0, 0, 287, 289, 3, 24, 12, 0, 288, 287, 1, 0, 0, 0, 288, 289, 1, 0, 0, 0, 289, 292, 1, 0, 0, 0, 290, 291, 5, 30, 0, 0, 291, 293, 3, 24, 12, 0, 292, 290, 1, 0, 0, 0, 292, 293, 1, 0, 0, 0, 293, 35, 1, 0, 0, 0, 294, 295, 5, 8, 0, 0, 295, 298, 3, 24, 12, 0, 296, 297, 5, 30, 0, 0, 297, 299, 3, 24, 12, 0, 298, 296, 1, 0, 0, 0, 298, 299, 1, 0, 0, 0, 299, 37, 1, 0, 0, 0, 300, 301, 7, 2, 0, 0, 301, 39, 1, 0, 0, 0, 302, 307, 3, 44, 22, 0, 303, 304, 5, 36, 0, 0, 304, 306, 3, 44, 22, 0, 305, 303, 1, 0, 0, 0, 306, 309, 1, 0, 0, 0, 307, 305, 1, 0, 0, 0, 307, 308, 1, 0, 0, 0, 308, 41, 1, 0, 0, 0, 309, 307, 1, 0, 0, 0, 310, 315, 3, 46, 23, 0, 311, 312, 5, 36, 0, 0, 312, 314, 3, 46, 23, 0, 313, 311, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, 1, 0, 0, 0, 316, 43, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 319, 7, 3, 0, 0, 319, 45, 1, 0, 0, 0, 320, 321, 7, 4, 0, 0, 321, 47, 1, 0, 0, 0, 322, 365, 5, 45, 0, 0, 323, 324, 3, 80, 40, 0, 324, 325, 5, 65, 0, 0, 325, 365, 1, 0, 0, 0, 326, 365, 3, 78, 39, 0, 327, 365, 3, 80, 40, 0, 328, 365, 3, 74, 37, 0, 329, 365, 5, 48, 0, 0, 330, 365, 3, 82, 41, 0, 331, 332, 5, 63, 0, 0, 332, 337, 3, 76, 38, 0, 333, 334, 5, 34, 0, 0, 334, 336, 3, 76, 38, 0, 335, 333, 1, 0, 0, 0, 336, 339, 1, 0, 0, 0, 337, 335, 1, 0, 0, 0, 337, 338, 1, 0, 0, 0, 338, 340, 1, 0, 0, 0, 339, 337, 1, 0, 0, 0, 340, 341, 5, 64, 0, 0, 341, 365, 1, 0, 0, 0, 342, 343, 5, 63, 0, 0, 343, 348, 3, 74, 37, 0, 344, 345, 5, 34, 0, 0, 345, 347, 3, 74, 37, 0, 346, 344, 1, 0, 0, 0, 347, 350, 1, 0, 0, 0, 348, 346, 1, 0, 0, 0, 348, 349, 1, 0, 0, 0, 349, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, 352, 5, 64, 0, 0, 352, 365, 1, 0, 0, 0, 353, 354, 5, 63, 0, 0, 354, 359, 3, 82, 41, 0, 355, 356, 5, 34, 0, 0, 356, 358, 3, 82, 41, 0, 357, 355, 1, 0, 0, 0, 358, 361, 1, 0, 0, 0, 359, 357, 1, 0, 0, 0, 359, 360, 1, 0, 0, 0, 360, 362, 1, 0, 0, 0, 361, 359, 1, 0, 0, 0, 362, 363, 5, 64, 0, 0, 363, 365, 1, 0, 0, 0, 364, 322, 1, 0, 0, 0, 364, 323, 1, 0, 0, 0, 364, 326, 1, 0, 0, 0, 364, 327, 1, 0, 0, 0, 364, 328, 1, 0, 0, 0, 364, 329, 1, 0, 0, 0, 364, 330, 1, 0, 0, 0, 364, 331, 1, 0, 0, 0, 364, 342, 1, 0, 0, 0, 364, 353, 1, 0, 0, 0, 365, 49, 1, 0, 0, 0, 366, 367, 5, 10, 0, 0, 367, 368, 5, 28, 0, 0, 368, 51, 1, 0, 0, 0, 369, 370, 5, 16, 0, 0, 370, 375, 3, 54, 27, 0, 371, 372, 5, 34, 0, 0, 372, 374, 3, 54, 27, 0, 373, 371, 1, 0, 0, 0, 374, 377, 1, 0, 0, 0, 375, 373, 1, 0, 0, 0, 375, 376, 1, 0, 0, 0, 376, 53, 1, 0, 0, 0, 377, 375, 1, 0, 0, 0, 378, 380, 3, 10, 5, 0, 379, 381, 7, 5, 0, 0, 380, 379, 1, 0, 0, 0, 380, 381, 1, 0, 0, 0, 381, 384, 1, 0, 0, 0, 382, 383, 5, 46, 0, 0, 383, 385, 7, 6, 0, 0, 384, 382, 1, 0, 0, 0, 384, 385, 1, 0, 0, 0, 385, 55, 1, 0, 0, 0, 386, 387, 5, 9, 0, 0, 387, 392, 3, 42, 21, 0, 388, 389, 5, 34, 0, 0, 389, 391, 3, 42, 21, 0, 390, 388, 1, 0, 0, 0, 391, 394, 1, 0, 0, 0, 392, 390, 1, 0, 0, 0, 392, 393, 1, 0, 0, 0, 393, 405, 1, 0, 0, 0, 394, 392, 1, 0, 0, 0, 395, 396, 5, 12, 0, 0, 396, 401, 3, 42, 21, 0, 397, 398, 5, 34, 0, 0, 398, 400, 3, 42, 21, 0, 399, 397, 1, 0, 0, 0, 400, 403, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 401, 402, 1, 0, 0, 0, 402, 405, 1, 0, 0, 0, 403, 401, 1, 0, 0, 0, 404, 386, 1, 0, 0, 0, 404, 395, 1, 0, 0, 0, 405, 57, 1, 0, 0, 0, 406, 407, 5, 2, 0, 0, 407, 412, 3, 42, 21, 0, 408, 409, 5, 34, 0, 0, 409, 411, 3, 42, 21, 0, 410, 408, 1, 0, 0, 0, 411, 414, 1, 0, 0, 0, 412, 410, 1, 0, 0, 0, 412, 413, 1, 0, 0, 0, 413, 59, 1, 0, 0, 0, 414, 412, 1, 0, 0, 0, 415, 416, 5, 13, 0, 0, 416, 421, 3, 62, 31, 0, 417, 418, 5, 34, 0, 0, 418, 420, 3, 62, 31, 0, 419, 417, 1, 0, 0, 0, 420, 423, 1, 0, 0, 0, 421, 419, 1, 0, 0, 0, 421, 422, 1, 0, 0, 0, 422, 61, 1, 0, 0, 0, 423, 421, 1, 0, 0, 0, 424, 425, 3, 42, 21, 0, 425, 426, 5, 79, 0, 0, 426, 427, 3, 42, 21, 0, 427, 63, 1, 0, 0, 0, 428, 429, 5, 1, 0, 0, 429, 430, 3, 18, 9, 0, 430, 432, 3, 82, 41, 0, 431, 433, 3, 70, 35, 0, 432, 431, 1, 0, 0, 0, 432, 433, 1, 0, 0, 0, 433, 65, 1, 0, 0, 0, 434, 435, 5, 7, 0, 0, 435, 436, 3, 18, 9, 0, 436, 437, 3, 82, 41, 0, 437, 67, 1, 0, 0, 0, 438, 439, 5, 11, 0, 0, 439, 440, 3, 40, 20, 0, 440, 69, 1, 0, 0, 0, 441, 446, 3, 72, 36, 0, 442, 443, 5, 34, 0, 0, 443, 445, 3, 72, 36, 0, 444, 442, 1, 0, 0, 0, 445, 448, 1, 0, 0, 0, 446, 444, 1, 0, 0, 0, 446, 447, 1, 0, 0, 0, 447, 71, 1, 0, 0, 0, 448, 446, 1, 0, 0, 0, 449, 450, 3, 44, 22, 0, 450, 451, 5, 33, 0, 0, 451, 452, 3, 48, 24, 0, 452, 73, 1, 0, 0, 0, 453, 454, 7, 7, 0, 0, 454, 75, 1, 0, 0, 0, 455, 458, 3, 78, 39, 0, 456, 458, 3, 80, 40, 0, 457, 455, 1, 0, 0, 0, 457, 456, 1, 0, 0, 0, 458, 77, 1, 0, 0, 0, 459, 461, 7, 0, 0, 0, 460, 459, 1, 0, 0, 0, 460, 461, 1, 0, 0, 0, 461, 462, 1, 0, 0, 0, 462, 463, 5, 29, 0, 0, 463, 79, 1, 0, 0, 0, 464, 466, 7, 0, 0, 0, 465, 464, 1, 0, 0, 0, 465, 466, 1, 0, 0, 0, 466, 467, 1, 0, 0, 0, 467, 468, 5, 28, 0, 0, 468, 81, 1, 0, 0, 0, 469, 470, 5, 27, 0, 0, 470, 83, 1, 0, 0, 0, 471, 472, 7, 8, 0, 0, 472, 85, 1, 0, 0, 0, 473, 474, 5, 5, 0, 0, 474, 475, 3, 88, 44, 0, 475, 87, 1, 0, 0, 0, 476, 477, 5, 63, 0, 0, 477, 478, 3, 2, 1, 0, 478, 479, 5, 64, 0, 0, 479, 89, 1, 0, 0, 0, 480, 481, 5, 15, 0, 0, 481, 485, 5, 95, 0, 0, 482, 483, 5, 15, 0, 0, 483, 485, 5, 96, 0, 0, 484, 480, 1, 0, 0, 0, 484, 482, 1, 0, 0, 0, 485, 91, 1, 0, 0, 0, 486, 490, 5, 3, 0, 0, 487, 489, 3, 96, 48, 0, 488, 487, 1, 0, 0, 0, 489, 492, 1, 0, 0, 0, 490, 488, 1, 0, 0, 0, 490, 491, 1, 0, 0, 0, 491, 493, 1, 0, 0, 0, 492, 490, 1, 0, 0, 0, 493, 496, 5, 85, 0, 0, 494, 495, 5, 83, 0, 0, 495, 497, 3, 42, 21, 0, 496, 494, 1, 0, 0, 0, 496, 497, 1, 0, 0, 0, 497, 507, 1, 0, 0, 0, 498, 499, 5, 84, 0, 0, 499, 504, 3, 94, 47, 0, 500, 501, 5, 34, 0, 0, 501, 503, 3, 94, 47, 0, 502, 500, 1, 0, 0, 0, 503, 506, 1, 0, 0, 0, 504, 502, 1, 0, 0, 0, 504, 505, 1, 0, 0, 0, 505, 508, 1, 0, 0, 0, 506, 504, 1, 0, 0, 0, 507, 498, 1, 0, 0, 0, 507, 508, 1, 0, 0, 0, 508, 93, 1, 0, 0, 0, 509, 510, 3, 42, 21, 0, 510, 511, 5, 33, 0, 0, 511, 513, 1, 0, 0, 0, 512, 509, 1, 0, 0, 0, 512, 513, 1, 0, 0, 0, 513, 514, 1, 0, 0, 0, 514, 515, 3, 42, 21, 0, 515, 95, 1, 0, 0, 0, 516, 517, 5, 63, 0, 0, 517, 518, 5, 101, 0, 0, 518, 519, 5, 100, 0, 0, 519, 520, 5, 101, 0, 0, 520, 521, 5, 64, 0, 0, 521, 97, 1, 0, 0, 0, 52, 109, 116, 131, 143, 152, 160, 164, 172, 174, 179, 186, 191, 198, 204, 212, 214, 224, 234, 237, 249, 257, 265, 269, 278, 288, 292, 298, 307, 315, 337, 348, 359, 364, 375, 380, 384, 392, 401, 404, 412, 421, 432, 446, 457, 460, 465, 484, 490, 496, 504, 507, 512] \ No newline at end of file diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java index 49e72c2ef2b14..0207be3d2305f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java @@ -43,30 +43,30 @@ public class EsqlBaseParser extends Parser { RULE_valueExpression = 7, RULE_operatorExpression = 8, RULE_primaryExpression = 9, RULE_functionExpression = 10, RULE_rowCommand = 11, RULE_fields = 12, RULE_field = 13, RULE_fromCommand = 14, RULE_metadata = 15, RULE_evalCommand = 16, - RULE_statsCommand = 17, RULE_inlinestatsCommand = 18, RULE_grouping = 19, - RULE_fromIdentifier = 20, RULE_qualifiedName = 21, RULE_qualifiedNamePattern = 22, - RULE_identifier = 23, RULE_identifierPattern = 24, RULE_constant = 25, - RULE_limitCommand = 26, RULE_sortCommand = 27, RULE_orderExpression = 28, - RULE_keepCommand = 29, RULE_dropCommand = 30, RULE_renameCommand = 31, - RULE_renameClause = 32, RULE_dissectCommand = 33, RULE_grokCommand = 34, - RULE_mvExpandCommand = 35, RULE_commandOptions = 36, RULE_commandOption = 37, - RULE_booleanValue = 38, RULE_numericValue = 39, RULE_decimalValue = 40, - RULE_integerValue = 41, RULE_string = 42, RULE_comparisonOperator = 43, - RULE_explainCommand = 44, RULE_subqueryExpression = 45, RULE_showCommand = 46, - RULE_enrichCommand = 47, RULE_enrichWithClause = 48, RULE_setting = 49; + RULE_statsCommand = 17, RULE_inlinestatsCommand = 18, RULE_fromIdentifier = 19, + RULE_qualifiedName = 20, RULE_qualifiedNamePattern = 21, RULE_identifier = 22, + RULE_identifierPattern = 23, RULE_constant = 24, RULE_limitCommand = 25, + RULE_sortCommand = 26, RULE_orderExpression = 27, RULE_keepCommand = 28, + RULE_dropCommand = 29, RULE_renameCommand = 30, RULE_renameClause = 31, + RULE_dissectCommand = 32, RULE_grokCommand = 33, RULE_mvExpandCommand = 34, + RULE_commandOptions = 35, RULE_commandOption = 36, RULE_booleanValue = 37, + RULE_numericValue = 38, RULE_decimalValue = 39, RULE_integerValue = 40, + RULE_string = 41, RULE_comparisonOperator = 42, RULE_explainCommand = 43, + RULE_subqueryExpression = 44, RULE_showCommand = 45, RULE_enrichCommand = 46, + RULE_enrichWithClause = 47, RULE_setting = 48; private static String[] makeRuleNames() { return new String[] { "singleStatement", "query", "sourceCommand", "processingCommand", "whereCommand", "booleanExpression", "regexBooleanExpression", "valueExpression", "operatorExpression", "primaryExpression", "functionExpression", "rowCommand", "fields", "field", "fromCommand", "metadata", "evalCommand", "statsCommand", "inlinestatsCommand", - "grouping", "fromIdentifier", "qualifiedName", "qualifiedNamePattern", - "identifier", "identifierPattern", "constant", "limitCommand", "sortCommand", - "orderExpression", "keepCommand", "dropCommand", "renameCommand", "renameClause", - "dissectCommand", "grokCommand", "mvExpandCommand", "commandOptions", - "commandOption", "booleanValue", "numericValue", "decimalValue", "integerValue", - "string", "comparisonOperator", "explainCommand", "subqueryExpression", - "showCommand", "enrichCommand", "enrichWithClause", "setting" + "fromIdentifier", "qualifiedName", "qualifiedNamePattern", "identifier", + "identifierPattern", "constant", "limitCommand", "sortCommand", "orderExpression", + "keepCommand", "dropCommand", "renameCommand", "renameClause", "dissectCommand", + "grokCommand", "mvExpandCommand", "commandOptions", "commandOption", + "booleanValue", "numericValue", "decimalValue", "integerValue", "string", + "comparisonOperator", "explainCommand", "subqueryExpression", "showCommand", + "enrichCommand", "enrichWithClause", "setting" }; } public static final String[] ruleNames = makeRuleNames(); @@ -195,9 +195,9 @@ public final SingleStatementContext singleStatement() throws RecognitionExceptio try { enterOuterAlt(_localctx, 1); { - setState(100); + setState(98); query(0); - setState(101); + setState(99); match(EOF); } } @@ -293,11 +293,11 @@ private QueryContext query(int _p) throws RecognitionException { _ctx = _localctx; _prevctx = _localctx; - setState(104); + setState(102); sourceCommand(); } _ctx.stop = _input.LT(-1); - setState(111); + setState(109); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,0,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -308,16 +308,16 @@ private QueryContext query(int _p) throws RecognitionException { { _localctx = new CompositeQueryContext(new QueryContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_query); - setState(106); + setState(104); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(107); + setState(105); match(PIPE); - setState(108); + setState(106); processingCommand(); } } } - setState(113); + setState(111); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,0,_ctx); } @@ -372,34 +372,34 @@ public final SourceCommandContext sourceCommand() throws RecognitionException { SourceCommandContext _localctx = new SourceCommandContext(_ctx, getState()); enterRule(_localctx, 4, RULE_sourceCommand); try { - setState(118); + setState(116); _errHandler.sync(this); switch (_input.LA(1)) { case EXPLAIN: enterOuterAlt(_localctx, 1); { - setState(114); + setState(112); explainCommand(); } break; case FROM: enterOuterAlt(_localctx, 2); { - setState(115); + setState(113); fromCommand(); } break; case ROW: enterOuterAlt(_localctx, 3); { - setState(116); + setState(114); rowCommand(); } break; case SHOW: enterOuterAlt(_localctx, 4); { - setState(117); + setState(115); showCommand(); } break; @@ -483,27 +483,27 @@ public final ProcessingCommandContext processingCommand() throws RecognitionExce ProcessingCommandContext _localctx = new ProcessingCommandContext(_ctx, getState()); enterRule(_localctx, 6, RULE_processingCommand); try { - setState(133); + setState(131); _errHandler.sync(this); switch (_input.LA(1)) { case EVAL: enterOuterAlt(_localctx, 1); { - setState(120); + setState(118); evalCommand(); } break; case INLINESTATS: enterOuterAlt(_localctx, 2); { - setState(121); + setState(119); inlinestatsCommand(); } break; case LIMIT: enterOuterAlt(_localctx, 3); { - setState(122); + setState(120); limitCommand(); } break; @@ -511,70 +511,70 @@ public final ProcessingCommandContext processingCommand() throws RecognitionExce case PROJECT: enterOuterAlt(_localctx, 4); { - setState(123); + setState(121); keepCommand(); } break; case SORT: enterOuterAlt(_localctx, 5); { - setState(124); + setState(122); sortCommand(); } break; case STATS: enterOuterAlt(_localctx, 6); { - setState(125); + setState(123); statsCommand(); } break; case WHERE: enterOuterAlt(_localctx, 7); { - setState(126); + setState(124); whereCommand(); } break; case DROP: enterOuterAlt(_localctx, 8); { - setState(127); + setState(125); dropCommand(); } break; case RENAME: enterOuterAlt(_localctx, 9); { - setState(128); + setState(126); renameCommand(); } break; case DISSECT: enterOuterAlt(_localctx, 10); { - setState(129); + setState(127); dissectCommand(); } break; case GROK: enterOuterAlt(_localctx, 11); { - setState(130); + setState(128); grokCommand(); } break; case ENRICH: enterOuterAlt(_localctx, 12); { - setState(131); + setState(129); enrichCommand(); } break; case MV_EXPAND: enterOuterAlt(_localctx, 13); { - setState(132); + setState(130); mvExpandCommand(); } break; @@ -625,9 +625,9 @@ public final WhereCommandContext whereCommand() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(135); + setState(133); match(WHERE); - setState(136); + setState(134); booleanExpression(0); } } @@ -822,7 +822,7 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc int _alt; enterOuterAlt(_localctx, 1); { - setState(166); + setState(164); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,6,_ctx) ) { case 1: @@ -831,9 +831,9 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _ctx = _localctx; _prevctx = _localctx; - setState(139); + setState(137); match(NOT); - setState(140); + setState(138); booleanExpression(7); } break; @@ -842,7 +842,7 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new BooleanDefaultContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(141); + setState(139); valueExpression(); } break; @@ -851,7 +851,7 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new RegexExpressionContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(142); + setState(140); regexBooleanExpression(); } break; @@ -860,41 +860,41 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new LogicalInContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(143); + setState(141); valueExpression(); - setState(145); + setState(143); _errHandler.sync(this); _la = _input.LA(1); if (_la==NOT) { { - setState(144); + setState(142); match(NOT); } } - setState(147); + setState(145); match(IN); - setState(148); + setState(146); match(LP); - setState(149); + setState(147); valueExpression(); - setState(154); + setState(152); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(150); + setState(148); match(COMMA); - setState(151); + setState(149); valueExpression(); } } - setState(156); + setState(154); _errHandler.sync(this); _la = _input.LA(1); } - setState(157); + setState(155); match(RP); } break; @@ -903,27 +903,27 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new IsNullContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(159); + setState(157); valueExpression(); - setState(160); + setState(158); match(IS); - setState(162); + setState(160); _errHandler.sync(this); _la = _input.LA(1); if (_la==NOT) { { - setState(161); + setState(159); match(NOT); } } - setState(164); + setState(162); match(NULL); } break; } _ctx.stop = _input.LT(-1); - setState(176); + setState(174); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,8,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -931,7 +931,7 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(174); + setState(172); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,7,_ctx) ) { case 1: @@ -939,11 +939,11 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); ((LogicalBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); - setState(168); + setState(166); if (!(precpred(_ctx, 4))) throw new FailedPredicateException(this, "precpred(_ctx, 4)"); - setState(169); + setState(167); ((LogicalBinaryContext)_localctx).operator = match(AND); - setState(170); + setState(168); ((LogicalBinaryContext)_localctx).right = booleanExpression(5); } break; @@ -952,18 +952,18 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); ((LogicalBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); - setState(171); + setState(169); if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(172); + setState(170); ((LogicalBinaryContext)_localctx).operator = match(OR); - setState(173); + setState(171); ((LogicalBinaryContext)_localctx).right = booleanExpression(4); } break; } } } - setState(178); + setState(176); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,8,_ctx); } @@ -1018,48 +1018,48 @@ public final RegexBooleanExpressionContext regexBooleanExpression() throws Recog enterRule(_localctx, 12, RULE_regexBooleanExpression); int _la; try { - setState(193); + setState(191); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,11,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(179); + setState(177); valueExpression(); - setState(181); + setState(179); _errHandler.sync(this); _la = _input.LA(1); if (_la==NOT) { { - setState(180); + setState(178); match(NOT); } } - setState(183); + setState(181); ((RegexBooleanExpressionContext)_localctx).kind = match(LIKE); - setState(184); + setState(182); ((RegexBooleanExpressionContext)_localctx).pattern = string(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(186); + setState(184); valueExpression(); - setState(188); + setState(186); _errHandler.sync(this); _la = _input.LA(1); if (_la==NOT) { { - setState(187); + setState(185); match(NOT); } } - setState(190); + setState(188); ((RegexBooleanExpressionContext)_localctx).kind = match(RLIKE); - setState(191); + setState(189); ((RegexBooleanExpressionContext)_localctx).pattern = string(); } break; @@ -1145,14 +1145,14 @@ public final ValueExpressionContext valueExpression() throws RecognitionExceptio ValueExpressionContext _localctx = new ValueExpressionContext(_ctx, getState()); enterRule(_localctx, 14, RULE_valueExpression); try { - setState(200); + setState(198); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { case 1: _localctx = new ValueExpressionDefaultContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(195); + setState(193); operatorExpression(0); } break; @@ -1160,11 +1160,11 @@ public final ValueExpressionContext valueExpression() throws RecognitionExceptio _localctx = new ComparisonContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(196); + setState(194); ((ComparisonContext)_localctx).left = operatorExpression(0); - setState(197); + setState(195); comparisonOperator(); - setState(198); + setState(196); ((ComparisonContext)_localctx).right = operatorExpression(0); } break; @@ -1289,7 +1289,7 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE int _alt; enterOuterAlt(_localctx, 1); { - setState(206); + setState(204); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { case 1: @@ -1298,7 +1298,7 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _ctx = _localctx; _prevctx = _localctx; - setState(203); + setState(201); primaryExpression(); } break; @@ -1307,7 +1307,7 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _localctx = new ArithmeticUnaryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(204); + setState(202); ((ArithmeticUnaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { @@ -1318,13 +1318,13 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _errHandler.reportMatch(this); consume(); } - setState(205); + setState(203); operatorExpression(3); } break; } _ctx.stop = _input.LT(-1); - setState(216); + setState(214); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,15,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -1332,7 +1332,7 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(214); + setState(212); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,14,_ctx) ) { case 1: @@ -1340,9 +1340,9 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _localctx = new ArithmeticBinaryContext(new OperatorExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_operatorExpression); - setState(208); + setState(206); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(209); + setState(207); ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(((_la) & ~0x3f) == 0 && ((1L << _la) & 8070450532247928832L) != 0) ) { @@ -1353,7 +1353,7 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _errHandler.reportMatch(this); consume(); } - setState(210); + setState(208); ((ArithmeticBinaryContext)_localctx).right = operatorExpression(3); } break; @@ -1362,9 +1362,9 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _localctx = new ArithmeticBinaryContext(new OperatorExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_operatorExpression); - setState(211); + setState(209); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(212); + setState(210); ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { @@ -1375,14 +1375,14 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _errHandler.reportMatch(this); consume(); } - setState(213); + setState(211); ((ArithmeticBinaryContext)_localctx).right = operatorExpression(2); } break; } } } - setState(218); + setState(216); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,15,_ctx); } @@ -1504,14 +1504,14 @@ public final PrimaryExpressionContext primaryExpression() throws RecognitionExce PrimaryExpressionContext _localctx = new PrimaryExpressionContext(_ctx, getState()); enterRule(_localctx, 18, RULE_primaryExpression); try { - setState(226); + setState(224); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,16,_ctx) ) { case 1: _localctx = new ConstantDefaultContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(219); + setState(217); constant(); } break; @@ -1519,7 +1519,7 @@ public final PrimaryExpressionContext primaryExpression() throws RecognitionExce _localctx = new DereferenceContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(220); + setState(218); qualifiedName(); } break; @@ -1527,7 +1527,7 @@ public final PrimaryExpressionContext primaryExpression() throws RecognitionExce _localctx = new FunctionContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(221); + setState(219); functionExpression(); } break; @@ -1535,11 +1535,11 @@ public final PrimaryExpressionContext primaryExpression() throws RecognitionExce _localctx = new ParenthesizedExpressionContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(222); + setState(220); match(LP); - setState(223); + setState(221); booleanExpression(0); - setState(224); + setState(222); match(RP); } break; @@ -1601,16 +1601,16 @@ public final FunctionExpressionContext functionExpression() throws RecognitionEx try { enterOuterAlt(_localctx, 1); { - setState(228); + setState(226); identifier(); - setState(229); + setState(227); match(LP); - setState(239); + setState(237); _errHandler.sync(this); switch (_input.LA(1)) { case ASTERISK: { - setState(230); + setState(228); match(ASTERISK); } break; @@ -1630,21 +1630,21 @@ public final FunctionExpressionContext functionExpression() throws RecognitionEx case QUOTED_IDENTIFIER: { { - setState(231); + setState(229); booleanExpression(0); - setState(236); + setState(234); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(232); + setState(230); match(COMMA); - setState(233); + setState(231); booleanExpression(0); } } - setState(238); + setState(236); _errHandler.sync(this); _la = _input.LA(1); } @@ -1656,7 +1656,7 @@ public final FunctionExpressionContext functionExpression() throws RecognitionEx default: break; } - setState(241); + setState(239); match(RP); } } @@ -1703,9 +1703,9 @@ public final RowCommandContext rowCommand() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(243); + setState(241); match(ROW); - setState(244); + setState(242); fields(); } } @@ -1759,23 +1759,23 @@ public final FieldsContext fields() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(246); + setState(244); field(); - setState(251); + setState(249); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,19,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(247); + setState(245); match(COMMA); - setState(248); + setState(246); field(); } } } - setState(253); + setState(251); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,19,_ctx); } @@ -1825,24 +1825,24 @@ public final FieldContext field() throws RecognitionException { FieldContext _localctx = new FieldContext(_ctx, getState()); enterRule(_localctx, 26, RULE_field); try { - setState(259); + setState(257); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,20,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(254); + setState(252); booleanExpression(0); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(255); + setState(253); qualifiedName(); - setState(256); + setState(254); match(ASSIGN); - setState(257); + setState(255); booleanExpression(0); } break; @@ -1902,34 +1902,34 @@ public final FromCommandContext fromCommand() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(261); + setState(259); match(FROM); - setState(262); + setState(260); fromIdentifier(); - setState(267); + setState(265); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,21,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(263); + setState(261); match(COMMA); - setState(264); + setState(262); fromIdentifier(); } } } - setState(269); + setState(267); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,21,_ctx); } - setState(271); + setState(269); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,22,_ctx) ) { case 1: { - setState(270); + setState(268); metadata(); } break; @@ -1989,29 +1989,29 @@ public final MetadataContext metadata() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(273); + setState(271); match(OPENING_BRACKET); - setState(274); + setState(272); match(METADATA); - setState(275); + setState(273); fromIdentifier(); - setState(280); + setState(278); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(276); + setState(274); match(COMMA); - setState(277); + setState(275); fromIdentifier(); } } - setState(282); + setState(280); _errHandler.sync(this); _la = _input.LA(1); } - setState(283); + setState(281); match(CLOSING_BRACKET); } } @@ -2058,9 +2058,9 @@ public final EvalCommandContext evalCommand() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(285); + setState(283); match(EVAL); - setState(286); + setState(284); fields(); } } @@ -2077,13 +2077,15 @@ public final EvalCommandContext evalCommand() throws RecognitionException { @SuppressWarnings("CheckReturnValue") public static class StatsCommandContext extends ParserRuleContext { + public FieldsContext stats; + public FieldsContext grouping; public TerminalNode STATS() { return getToken(EsqlBaseParser.STATS, 0); } - public FieldsContext fields() { - return getRuleContext(FieldsContext.class,0); - } public TerminalNode BY() { return getToken(EsqlBaseParser.BY, 0); } - public GroupingContext grouping() { - return getRuleContext(GroupingContext.class,0); + public List fields() { + return getRuleContexts(FieldsContext.class); + } + public FieldsContext fields(int i) { + return getRuleContext(FieldsContext.class,i); } @SuppressWarnings("this-escape") public StatsCommandContext(ParserRuleContext parent, int invokingState) { @@ -2111,27 +2113,27 @@ public final StatsCommandContext statsCommand() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(288); + setState(286); match(STATS); - setState(290); + setState(288); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,24,_ctx) ) { case 1: { - setState(289); - fields(); + setState(287); + ((StatsCommandContext)_localctx).stats = fields(); } break; } - setState(294); + setState(292); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,25,_ctx) ) { case 1: { - setState(292); + setState(290); match(BY); - setState(293); - grouping(); + setState(291); + ((StatsCommandContext)_localctx).grouping = fields(); } break; } @@ -2150,14 +2152,16 @@ public final StatsCommandContext statsCommand() throws RecognitionException { @SuppressWarnings("CheckReturnValue") public static class InlinestatsCommandContext extends ParserRuleContext { + public FieldsContext stats; + public FieldsContext grouping; public TerminalNode INLINESTATS() { return getToken(EsqlBaseParser.INLINESTATS, 0); } - public FieldsContext fields() { - return getRuleContext(FieldsContext.class,0); + public List fields() { + return getRuleContexts(FieldsContext.class); } - public TerminalNode BY() { return getToken(EsqlBaseParser.BY, 0); } - public GroupingContext grouping() { - return getRuleContext(GroupingContext.class,0); + public FieldsContext fields(int i) { + return getRuleContext(FieldsContext.class,i); } + public TerminalNode BY() { return getToken(EsqlBaseParser.BY, 0); } @SuppressWarnings("this-escape") public InlinestatsCommandContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -2184,19 +2188,19 @@ public final InlinestatsCommandContext inlinestatsCommand() throws RecognitionEx try { enterOuterAlt(_localctx, 1); { - setState(296); + setState(294); match(INLINESTATS); - setState(297); - fields(); - setState(300); + setState(295); + ((InlinestatsCommandContext)_localctx).stats = fields(); + setState(298); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,26,_ctx) ) { case 1: { - setState(298); + setState(296); match(BY); - setState(299); - grouping(); + setState(297); + ((InlinestatsCommandContext)_localctx).grouping = fields(); } break; } @@ -2213,78 +2217,6 @@ public final InlinestatsCommandContext inlinestatsCommand() throws RecognitionEx return _localctx; } - @SuppressWarnings("CheckReturnValue") - public static class GroupingContext extends ParserRuleContext { - public List qualifiedName() { - return getRuleContexts(QualifiedNameContext.class); - } - public QualifiedNameContext qualifiedName(int i) { - return getRuleContext(QualifiedNameContext.class,i); - } - public List COMMA() { return getTokens(EsqlBaseParser.COMMA); } - public TerminalNode COMMA(int i) { - return getToken(EsqlBaseParser.COMMA, i); - } - @SuppressWarnings("this-escape") - public GroupingContext(ParserRuleContext parent, int invokingState) { - super(parent, invokingState); - } - @Override public int getRuleIndex() { return RULE_grouping; } - @Override - public void enterRule(ParseTreeListener listener) { - if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).enterGrouping(this); - } - @Override - public void exitRule(ParseTreeListener listener) { - if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).exitGrouping(this); - } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof EsqlBaseParserVisitor ) return ((EsqlBaseParserVisitor)visitor).visitGrouping(this); - else return visitor.visitChildren(this); - } - } - - public final GroupingContext grouping() throws RecognitionException { - GroupingContext _localctx = new GroupingContext(_ctx, getState()); - enterRule(_localctx, 38, RULE_grouping); - try { - int _alt; - enterOuterAlt(_localctx, 1); - { - setState(302); - qualifiedName(); - setState(307); - _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,27,_ctx); - while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { - if ( _alt==1 ) { - { - { - setState(303); - match(COMMA); - setState(304); - qualifiedName(); - } - } - } - setState(309); - _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,27,_ctx); - } - } - } - catch (RecognitionException re) { - _localctx.exception = re; - _errHandler.reportError(this, re); - _errHandler.recover(this, re); - } - finally { - exitRule(); - } - return _localctx; - } - @SuppressWarnings("CheckReturnValue") public static class FromIdentifierContext extends ParserRuleContext { public TerminalNode FROM_UNQUOTED_IDENTIFIER() { return getToken(EsqlBaseParser.FROM_UNQUOTED_IDENTIFIER, 0); } @@ -2311,12 +2243,12 @@ public T accept(ParseTreeVisitor visitor) { public final FromIdentifierContext fromIdentifier() throws RecognitionException { FromIdentifierContext _localctx = new FromIdentifierContext(_ctx, getState()); - enterRule(_localctx, 40, RULE_fromIdentifier); + enterRule(_localctx, 38, RULE_fromIdentifier); int _la; try { enterOuterAlt(_localctx, 1); { - setState(310); + setState(300); _la = _input.LA(1); if ( !(_la==QUOTED_IDENTIFIER || _la==FROM_UNQUOTED_IDENTIFIER) ) { _errHandler.recoverInline(this); @@ -2373,30 +2305,30 @@ public T accept(ParseTreeVisitor visitor) { public final QualifiedNameContext qualifiedName() throws RecognitionException { QualifiedNameContext _localctx = new QualifiedNameContext(_ctx, getState()); - enterRule(_localctx, 42, RULE_qualifiedName); + enterRule(_localctx, 40, RULE_qualifiedName); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(312); + setState(302); identifier(); - setState(317); + setState(307); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,28,_ctx); + _alt = getInterpreter().adaptivePredict(_input,27,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(313); + setState(303); match(DOT); - setState(314); + setState(304); identifier(); } } } - setState(319); + setState(309); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,28,_ctx); + _alt = getInterpreter().adaptivePredict(_input,27,_ctx); } } } @@ -2445,30 +2377,30 @@ public T accept(ParseTreeVisitor visitor) { public final QualifiedNamePatternContext qualifiedNamePattern() throws RecognitionException { QualifiedNamePatternContext _localctx = new QualifiedNamePatternContext(_ctx, getState()); - enterRule(_localctx, 44, RULE_qualifiedNamePattern); + enterRule(_localctx, 42, RULE_qualifiedNamePattern); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(320); + setState(310); identifierPattern(); - setState(325); + setState(315); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,29,_ctx); + _alt = getInterpreter().adaptivePredict(_input,28,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(321); + setState(311); match(DOT); - setState(322); + setState(312); identifierPattern(); } } } - setState(327); + setState(317); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,29,_ctx); + _alt = getInterpreter().adaptivePredict(_input,28,_ctx); } } } @@ -2509,12 +2441,12 @@ public T accept(ParseTreeVisitor visitor) { public final IdentifierContext identifier() throws RecognitionException { IdentifierContext _localctx = new IdentifierContext(_ctx, getState()); - enterRule(_localctx, 46, RULE_identifier); + enterRule(_localctx, 44, RULE_identifier); int _la; try { enterOuterAlt(_localctx, 1); { - setState(328); + setState(318); _la = _input.LA(1); if ( !(_la==UNQUOTED_IDENTIFIER || _la==QUOTED_IDENTIFIER) ) { _errHandler.recoverInline(this); @@ -2563,12 +2495,12 @@ public T accept(ParseTreeVisitor visitor) { public final IdentifierPatternContext identifierPattern() throws RecognitionException { IdentifierPatternContext _localctx = new IdentifierPatternContext(_ctx, getState()); - enterRule(_localctx, 48, RULE_identifierPattern); + enterRule(_localctx, 46, RULE_identifierPattern); int _la; try { enterOuterAlt(_localctx, 1); { - setState(330); + setState(320); _la = _input.LA(1); if ( !(_la==QUOTED_IDENTIFIER || _la==UNQUOTED_ID_PATTERN) ) { _errHandler.recoverInline(this); @@ -2842,17 +2774,17 @@ public T accept(ParseTreeVisitor visitor) { public final ConstantContext constant() throws RecognitionException { ConstantContext _localctx = new ConstantContext(_ctx, getState()); - enterRule(_localctx, 50, RULE_constant); + enterRule(_localctx, 48, RULE_constant); int _la; try { - setState(374); + setState(364); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,33,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,32,_ctx) ) { case 1: _localctx = new NullLiteralContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(332); + setState(322); match(NULL); } break; @@ -2860,9 +2792,9 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new QualifiedIntegerLiteralContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(333); + setState(323); integerValue(); - setState(334); + setState(324); match(UNQUOTED_IDENTIFIER); } break; @@ -2870,7 +2802,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new DecimalLiteralContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(336); + setState(326); decimalValue(); } break; @@ -2878,7 +2810,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new IntegerLiteralContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(337); + setState(327); integerValue(); } break; @@ -2886,7 +2818,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new BooleanLiteralContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(338); + setState(328); booleanValue(); } break; @@ -2894,7 +2826,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new InputParamContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(339); + setState(329); match(PARAM); } break; @@ -2902,7 +2834,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new StringLiteralContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(340); + setState(330); string(); } break; @@ -2910,27 +2842,27 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new NumericArrayLiteralContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(341); + setState(331); match(OPENING_BRACKET); - setState(342); + setState(332); numericValue(); - setState(347); + setState(337); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(343); + setState(333); match(COMMA); - setState(344); + setState(334); numericValue(); } } - setState(349); + setState(339); _errHandler.sync(this); _la = _input.LA(1); } - setState(350); + setState(340); match(CLOSING_BRACKET); } break; @@ -2938,27 +2870,27 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new BooleanArrayLiteralContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(352); + setState(342); match(OPENING_BRACKET); - setState(353); + setState(343); booleanValue(); - setState(358); + setState(348); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(354); + setState(344); match(COMMA); - setState(355); + setState(345); booleanValue(); } } - setState(360); + setState(350); _errHandler.sync(this); _la = _input.LA(1); } - setState(361); + setState(351); match(CLOSING_BRACKET); } break; @@ -2966,27 +2898,27 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new StringArrayLiteralContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(363); + setState(353); match(OPENING_BRACKET); - setState(364); + setState(354); string(); - setState(369); + setState(359); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(365); + setState(355); match(COMMA); - setState(366); + setState(356); string(); } } - setState(371); + setState(361); _errHandler.sync(this); _la = _input.LA(1); } - setState(372); + setState(362); match(CLOSING_BRACKET); } break; @@ -3029,13 +2961,13 @@ public T accept(ParseTreeVisitor visitor) { public final LimitCommandContext limitCommand() throws RecognitionException { LimitCommandContext _localctx = new LimitCommandContext(_ctx, getState()); - enterRule(_localctx, 52, RULE_limitCommand); + enterRule(_localctx, 50, RULE_limitCommand); try { enterOuterAlt(_localctx, 1); { - setState(376); + setState(366); match(LIMIT); - setState(377); + setState(367); match(INTEGER_LITERAL); } } @@ -3085,32 +3017,32 @@ public T accept(ParseTreeVisitor visitor) { public final SortCommandContext sortCommand() throws RecognitionException { SortCommandContext _localctx = new SortCommandContext(_ctx, getState()); - enterRule(_localctx, 54, RULE_sortCommand); + enterRule(_localctx, 52, RULE_sortCommand); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(379); + setState(369); match(SORT); - setState(380); + setState(370); orderExpression(); - setState(385); + setState(375); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,34,_ctx); + _alt = getInterpreter().adaptivePredict(_input,33,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(381); + setState(371); match(COMMA); - setState(382); + setState(372); orderExpression(); } } } - setState(387); + setState(377); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,34,_ctx); + _alt = getInterpreter().adaptivePredict(_input,33,_ctx); } } } @@ -3159,19 +3091,19 @@ public T accept(ParseTreeVisitor visitor) { public final OrderExpressionContext orderExpression() throws RecognitionException { OrderExpressionContext _localctx = new OrderExpressionContext(_ctx, getState()); - enterRule(_localctx, 56, RULE_orderExpression); + enterRule(_localctx, 54, RULE_orderExpression); int _la; try { enterOuterAlt(_localctx, 1); { - setState(388); + setState(378); booleanExpression(0); - setState(390); + setState(380); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,35,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,34,_ctx) ) { case 1: { - setState(389); + setState(379); ((OrderExpressionContext)_localctx).ordering = _input.LT(1); _la = _input.LA(1); if ( !(_la==ASC || _la==DESC) ) { @@ -3185,14 +3117,14 @@ public final OrderExpressionContext orderExpression() throws RecognitionExceptio } break; } - setState(394); + setState(384); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,36,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,35,_ctx) ) { case 1: { - setState(392); + setState(382); match(NULLS); - setState(393); + setState(383); ((OrderExpressionContext)_localctx).nullOrdering = _input.LT(1); _la = _input.LA(1); if ( !(_la==FIRST || _la==LAST) ) { @@ -3255,63 +3187,63 @@ public T accept(ParseTreeVisitor visitor) { public final KeepCommandContext keepCommand() throws RecognitionException { KeepCommandContext _localctx = new KeepCommandContext(_ctx, getState()); - enterRule(_localctx, 58, RULE_keepCommand); + enterRule(_localctx, 56, RULE_keepCommand); try { int _alt; - setState(414); + setState(404); _errHandler.sync(this); switch (_input.LA(1)) { case KEEP: enterOuterAlt(_localctx, 1); { - setState(396); + setState(386); match(KEEP); - setState(397); + setState(387); qualifiedNamePattern(); - setState(402); + setState(392); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,37,_ctx); + _alt = getInterpreter().adaptivePredict(_input,36,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(398); + setState(388); match(COMMA); - setState(399); + setState(389); qualifiedNamePattern(); } } } - setState(404); + setState(394); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,37,_ctx); + _alt = getInterpreter().adaptivePredict(_input,36,_ctx); } } break; case PROJECT: enterOuterAlt(_localctx, 2); { - setState(405); + setState(395); match(PROJECT); - setState(406); + setState(396); qualifiedNamePattern(); - setState(411); + setState(401); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,38,_ctx); + _alt = getInterpreter().adaptivePredict(_input,37,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(407); + setState(397); match(COMMA); - setState(408); + setState(398); qualifiedNamePattern(); } } } - setState(413); + setState(403); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,38,_ctx); + _alt = getInterpreter().adaptivePredict(_input,37,_ctx); } } break; @@ -3365,32 +3297,32 @@ public T accept(ParseTreeVisitor visitor) { public final DropCommandContext dropCommand() throws RecognitionException { DropCommandContext _localctx = new DropCommandContext(_ctx, getState()); - enterRule(_localctx, 60, RULE_dropCommand); + enterRule(_localctx, 58, RULE_dropCommand); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(416); + setState(406); match(DROP); - setState(417); + setState(407); qualifiedNamePattern(); - setState(422); + setState(412); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,40,_ctx); + _alt = getInterpreter().adaptivePredict(_input,39,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(418); + setState(408); match(COMMA); - setState(419); + setState(409); qualifiedNamePattern(); } } } - setState(424); + setState(414); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,40,_ctx); + _alt = getInterpreter().adaptivePredict(_input,39,_ctx); } } } @@ -3440,32 +3372,32 @@ public T accept(ParseTreeVisitor visitor) { public final RenameCommandContext renameCommand() throws RecognitionException { RenameCommandContext _localctx = new RenameCommandContext(_ctx, getState()); - enterRule(_localctx, 62, RULE_renameCommand); + enterRule(_localctx, 60, RULE_renameCommand); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(425); + setState(415); match(RENAME); - setState(426); + setState(416); renameClause(); - setState(431); + setState(421); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,41,_ctx); + _alt = getInterpreter().adaptivePredict(_input,40,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(427); + setState(417); match(COMMA); - setState(428); + setState(418); renameClause(); } } } - setState(433); + setState(423); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,41,_ctx); + _alt = getInterpreter().adaptivePredict(_input,40,_ctx); } } } @@ -3513,15 +3445,15 @@ public T accept(ParseTreeVisitor visitor) { public final RenameClauseContext renameClause() throws RecognitionException { RenameClauseContext _localctx = new RenameClauseContext(_ctx, getState()); - enterRule(_localctx, 64, RULE_renameClause); + enterRule(_localctx, 62, RULE_renameClause); try { enterOuterAlt(_localctx, 1); { - setState(434); + setState(424); ((RenameClauseContext)_localctx).oldName = qualifiedNamePattern(); - setState(435); + setState(425); match(AS); - setState(436); + setState(426); ((RenameClauseContext)_localctx).newName = qualifiedNamePattern(); } } @@ -3570,22 +3502,22 @@ public T accept(ParseTreeVisitor visitor) { public final DissectCommandContext dissectCommand() throws RecognitionException { DissectCommandContext _localctx = new DissectCommandContext(_ctx, getState()); - enterRule(_localctx, 66, RULE_dissectCommand); + enterRule(_localctx, 64, RULE_dissectCommand); try { enterOuterAlt(_localctx, 1); { - setState(438); + setState(428); match(DISSECT); - setState(439); + setState(429); primaryExpression(); - setState(440); + setState(430); string(); - setState(442); + setState(432); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,42,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,41,_ctx) ) { case 1: { - setState(441); + setState(431); commandOptions(); } break; @@ -3634,15 +3566,15 @@ public T accept(ParseTreeVisitor visitor) { public final GrokCommandContext grokCommand() throws RecognitionException { GrokCommandContext _localctx = new GrokCommandContext(_ctx, getState()); - enterRule(_localctx, 68, RULE_grokCommand); + enterRule(_localctx, 66, RULE_grokCommand); try { enterOuterAlt(_localctx, 1); { - setState(444); + setState(434); match(GROK); - setState(445); + setState(435); primaryExpression(); - setState(446); + setState(436); string(); } } @@ -3685,13 +3617,13 @@ public T accept(ParseTreeVisitor visitor) { public final MvExpandCommandContext mvExpandCommand() throws RecognitionException { MvExpandCommandContext _localctx = new MvExpandCommandContext(_ctx, getState()); - enterRule(_localctx, 70, RULE_mvExpandCommand); + enterRule(_localctx, 68, RULE_mvExpandCommand); try { enterOuterAlt(_localctx, 1); { - setState(448); + setState(438); match(MV_EXPAND); - setState(449); + setState(439); qualifiedName(); } } @@ -3740,30 +3672,30 @@ public T accept(ParseTreeVisitor visitor) { public final CommandOptionsContext commandOptions() throws RecognitionException { CommandOptionsContext _localctx = new CommandOptionsContext(_ctx, getState()); - enterRule(_localctx, 72, RULE_commandOptions); + enterRule(_localctx, 70, RULE_commandOptions); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(451); + setState(441); commandOption(); - setState(456); + setState(446); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,43,_ctx); + _alt = getInterpreter().adaptivePredict(_input,42,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(452); + setState(442); match(COMMA); - setState(453); + setState(443); commandOption(); } } } - setState(458); + setState(448); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,43,_ctx); + _alt = getInterpreter().adaptivePredict(_input,42,_ctx); } } } @@ -3809,15 +3741,15 @@ public T accept(ParseTreeVisitor visitor) { public final CommandOptionContext commandOption() throws RecognitionException { CommandOptionContext _localctx = new CommandOptionContext(_ctx, getState()); - enterRule(_localctx, 74, RULE_commandOption); + enterRule(_localctx, 72, RULE_commandOption); try { enterOuterAlt(_localctx, 1); { - setState(459); + setState(449); identifier(); - setState(460); + setState(450); match(ASSIGN); - setState(461); + setState(451); constant(); } } @@ -3858,12 +3790,12 @@ public T accept(ParseTreeVisitor visitor) { public final BooleanValueContext booleanValue() throws RecognitionException { BooleanValueContext _localctx = new BooleanValueContext(_ctx, getState()); - enterRule(_localctx, 76, RULE_booleanValue); + enterRule(_localctx, 74, RULE_booleanValue); int _la; try { enterOuterAlt(_localctx, 1); { - setState(463); + setState(453); _la = _input.LA(1); if ( !(_la==FALSE || _la==TRUE) ) { _errHandler.recoverInline(this); @@ -3916,22 +3848,22 @@ public T accept(ParseTreeVisitor visitor) { public final NumericValueContext numericValue() throws RecognitionException { NumericValueContext _localctx = new NumericValueContext(_ctx, getState()); - enterRule(_localctx, 78, RULE_numericValue); + enterRule(_localctx, 76, RULE_numericValue); try { - setState(467); + setState(457); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,44,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,43,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(465); + setState(455); decimalValue(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(466); + setState(456); integerValue(); } break; @@ -3975,17 +3907,17 @@ public T accept(ParseTreeVisitor visitor) { public final DecimalValueContext decimalValue() throws RecognitionException { DecimalValueContext _localctx = new DecimalValueContext(_ctx, getState()); - enterRule(_localctx, 80, RULE_decimalValue); + enterRule(_localctx, 78, RULE_decimalValue); int _la; try { enterOuterAlt(_localctx, 1); { - setState(470); + setState(460); _errHandler.sync(this); _la = _input.LA(1); if (_la==PLUS || _la==MINUS) { { - setState(469); + setState(459); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { _errHandler.recoverInline(this); @@ -3998,7 +3930,7 @@ public final DecimalValueContext decimalValue() throws RecognitionException { } } - setState(472); + setState(462); match(DECIMAL_LITERAL); } } @@ -4040,17 +3972,17 @@ public T accept(ParseTreeVisitor visitor) { public final IntegerValueContext integerValue() throws RecognitionException { IntegerValueContext _localctx = new IntegerValueContext(_ctx, getState()); - enterRule(_localctx, 82, RULE_integerValue); + enterRule(_localctx, 80, RULE_integerValue); int _la; try { enterOuterAlt(_localctx, 1); { - setState(475); + setState(465); _errHandler.sync(this); _la = _input.LA(1); if (_la==PLUS || _la==MINUS) { { - setState(474); + setState(464); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { _errHandler.recoverInline(this); @@ -4063,7 +3995,7 @@ public final IntegerValueContext integerValue() throws RecognitionException { } } - setState(477); + setState(467); match(INTEGER_LITERAL); } } @@ -4103,11 +4035,11 @@ public T accept(ParseTreeVisitor visitor) { public final StringContext string() throws RecognitionException { StringContext _localctx = new StringContext(_ctx, getState()); - enterRule(_localctx, 84, RULE_string); + enterRule(_localctx, 82, RULE_string); try { enterOuterAlt(_localctx, 1); { - setState(479); + setState(469); match(STRING); } } @@ -4152,12 +4084,12 @@ public T accept(ParseTreeVisitor visitor) { public final ComparisonOperatorContext comparisonOperator() throws RecognitionException { ComparisonOperatorContext _localctx = new ComparisonOperatorContext(_ctx, getState()); - enterRule(_localctx, 86, RULE_comparisonOperator); + enterRule(_localctx, 84, RULE_comparisonOperator); int _la; try { enterOuterAlt(_localctx, 1); { - setState(481); + setState(471); _la = _input.LA(1); if ( !(((_la) & ~0x3f) == 0 && ((1L << _la) & 283726776524341248L) != 0) ) { _errHandler.recoverInline(this); @@ -4208,13 +4140,13 @@ public T accept(ParseTreeVisitor visitor) { public final ExplainCommandContext explainCommand() throws RecognitionException { ExplainCommandContext _localctx = new ExplainCommandContext(_ctx, getState()); - enterRule(_localctx, 88, RULE_explainCommand); + enterRule(_localctx, 86, RULE_explainCommand); try { enterOuterAlt(_localctx, 1); { - setState(483); + setState(473); match(EXPLAIN); - setState(484); + setState(474); subqueryExpression(); } } @@ -4258,15 +4190,15 @@ public T accept(ParseTreeVisitor visitor) { public final SubqueryExpressionContext subqueryExpression() throws RecognitionException { SubqueryExpressionContext _localctx = new SubqueryExpressionContext(_ctx, getState()); - enterRule(_localctx, 90, RULE_subqueryExpression); + enterRule(_localctx, 88, RULE_subqueryExpression); try { enterOuterAlt(_localctx, 1); { - setState(486); + setState(476); match(OPENING_BRACKET); - setState(487); + setState(477); query(0); - setState(488); + setState(478); match(CLOSING_BRACKET); } } @@ -4338,18 +4270,18 @@ public T accept(ParseTreeVisitor visitor) { public final ShowCommandContext showCommand() throws RecognitionException { ShowCommandContext _localctx = new ShowCommandContext(_ctx, getState()); - enterRule(_localctx, 92, RULE_showCommand); + enterRule(_localctx, 90, RULE_showCommand); try { - setState(494); + setState(484); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,47,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,46,_ctx) ) { case 1: _localctx = new ShowInfoContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(490); + setState(480); match(SHOW); - setState(491); + setState(481); match(INFO); } break; @@ -4357,9 +4289,9 @@ public final ShowCommandContext showCommand() throws RecognitionException { _localctx = new ShowFunctionsContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(492); + setState(482); match(SHOW); - setState(493); + setState(483); match(FUNCTIONS); } break; @@ -4425,68 +4357,68 @@ public T accept(ParseTreeVisitor visitor) { public final EnrichCommandContext enrichCommand() throws RecognitionException { EnrichCommandContext _localctx = new EnrichCommandContext(_ctx, getState()); - enterRule(_localctx, 94, RULE_enrichCommand); + enterRule(_localctx, 92, RULE_enrichCommand); int _la; try { int _alt; enterOuterAlt(_localctx, 1); { - setState(496); + setState(486); match(ENRICH); - setState(500); + setState(490); _errHandler.sync(this); _la = _input.LA(1); while (_la==OPENING_BRACKET) { { { - setState(497); + setState(487); setting(); } } - setState(502); + setState(492); _errHandler.sync(this); _la = _input.LA(1); } - setState(503); + setState(493); ((EnrichCommandContext)_localctx).policyName = match(ENRICH_POLICY_NAME); - setState(506); + setState(496); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,49,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,48,_ctx) ) { case 1: { - setState(504); + setState(494); match(ON); - setState(505); + setState(495); ((EnrichCommandContext)_localctx).matchField = qualifiedNamePattern(); } break; } - setState(517); + setState(507); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,51,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,50,_ctx) ) { case 1: { - setState(508); + setState(498); match(WITH); - setState(509); + setState(499); enrichWithClause(); - setState(514); + setState(504); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,50,_ctx); + _alt = getInterpreter().adaptivePredict(_input,49,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(510); + setState(500); match(COMMA); - setState(511); + setState(501); enrichWithClause(); } } } - setState(516); + setState(506); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,50,_ctx); + _alt = getInterpreter().adaptivePredict(_input,49,_ctx); } } break; @@ -4537,23 +4469,23 @@ public T accept(ParseTreeVisitor visitor) { public final EnrichWithClauseContext enrichWithClause() throws RecognitionException { EnrichWithClauseContext _localctx = new EnrichWithClauseContext(_ctx, getState()); - enterRule(_localctx, 96, RULE_enrichWithClause); + enterRule(_localctx, 94, RULE_enrichWithClause); try { enterOuterAlt(_localctx, 1); { - setState(522); + setState(512); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,52,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,51,_ctx) ) { case 1: { - setState(519); + setState(509); ((EnrichWithClauseContext)_localctx).newName = qualifiedNamePattern(); - setState(520); + setState(510); match(ASSIGN); } break; } - setState(524); + setState(514); ((EnrichWithClauseContext)_localctx).enrichField = qualifiedNamePattern(); } } @@ -4601,19 +4533,19 @@ public T accept(ParseTreeVisitor visitor) { public final SettingContext setting() throws RecognitionException { SettingContext _localctx = new SettingContext(_ctx, getState()); - enterRule(_localctx, 98, RULE_setting); + enterRule(_localctx, 96, RULE_setting); try { enterOuterAlt(_localctx, 1); { - setState(526); + setState(516); match(OPENING_BRACKET); - setState(527); + setState(517); ((SettingContext)_localctx).name = match(SETTING); - setState(528); + setState(518); match(COLON); - setState(529); + setState(519); ((SettingContext)_localctx).value = match(SETTING); - setState(530); + setState(520); match(CLOSING_BRACKET); } } @@ -4666,7 +4598,7 @@ private boolean operatorExpression_sempred(OperatorExpressionContext _localctx, } public static final String _serializedATN = - "\u0004\u0001h\u0215\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001\u0002"+ + "\u0004\u0001h\u020b\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001\u0002"+ "\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004\u0002"+ "\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007\u0002"+ "\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b\u0002"+ @@ -4679,335 +4611,329 @@ private boolean operatorExpression_sempred(OperatorExpressionContext _localctx, "\u0002\u001f\u0007\u001f\u0002 \u0007 \u0002!\u0007!\u0002\"\u0007\"\u0002"+ "#\u0007#\u0002$\u0007$\u0002%\u0007%\u0002&\u0007&\u0002\'\u0007\'\u0002"+ "(\u0007(\u0002)\u0007)\u0002*\u0007*\u0002+\u0007+\u0002,\u0007,\u0002"+ - "-\u0007-\u0002.\u0007.\u0002/\u0007/\u00020\u00070\u00021\u00071\u0001"+ - "\u0000\u0001\u0000\u0001\u0000\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ - "\u0001\u0001\u0001\u0001\u0001\u0005\u0001n\b\u0001\n\u0001\f\u0001q\t"+ - "\u0001\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0003\u0002w\b"+ - "\u0002\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001"+ - "\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001"+ - "\u0003\u0001\u0003\u0003\u0003\u0086\b\u0003\u0001\u0004\u0001\u0004\u0001"+ - "\u0004\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001"+ - "\u0005\u0001\u0005\u0003\u0005\u0092\b\u0005\u0001\u0005\u0001\u0005\u0001"+ - "\u0005\u0001\u0005\u0001\u0005\u0005\u0005\u0099\b\u0005\n\u0005\f\u0005"+ - "\u009c\t\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005"+ - "\u0003\u0005\u00a3\b\u0005\u0001\u0005\u0001\u0005\u0003\u0005\u00a7\b"+ - "\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001"+ - "\u0005\u0005\u0005\u00af\b\u0005\n\u0005\f\u0005\u00b2\t\u0005\u0001\u0006"+ - "\u0001\u0006\u0003\u0006\u00b6\b\u0006\u0001\u0006\u0001\u0006\u0001\u0006"+ - "\u0001\u0006\u0001\u0006\u0003\u0006\u00bd\b\u0006\u0001\u0006\u0001\u0006"+ - "\u0001\u0006\u0003\u0006\u00c2\b\u0006\u0001\u0007\u0001\u0007\u0001\u0007"+ - "\u0001\u0007\u0001\u0007\u0003\u0007\u00c9\b\u0007\u0001\b\u0001\b\u0001"+ - "\b\u0001\b\u0003\b\u00cf\b\b\u0001\b\u0001\b\u0001\b\u0001\b\u0001\b\u0001"+ - "\b\u0005\b\u00d7\b\b\n\b\f\b\u00da\t\b\u0001\t\u0001\t\u0001\t\u0001\t"+ - "\u0001\t\u0001\t\u0001\t\u0003\t\u00e3\b\t\u0001\n\u0001\n\u0001\n\u0001"+ - "\n\u0001\n\u0001\n\u0005\n\u00eb\b\n\n\n\f\n\u00ee\t\n\u0003\n\u00f0\b"+ - "\n\u0001\n\u0001\n\u0001\u000b\u0001\u000b\u0001\u000b\u0001\f\u0001\f"+ - "\u0001\f\u0005\f\u00fa\b\f\n\f\f\f\u00fd\t\f\u0001\r\u0001\r\u0001\r\u0001"+ - "\r\u0001\r\u0003\r\u0104\b\r\u0001\u000e\u0001\u000e\u0001\u000e\u0001"+ - "\u000e\u0005\u000e\u010a\b\u000e\n\u000e\f\u000e\u010d\t\u000e\u0001\u000e"+ - "\u0003\u000e\u0110\b\u000e\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f"+ - "\u0001\u000f\u0005\u000f\u0117\b\u000f\n\u000f\f\u000f\u011a\t\u000f\u0001"+ - "\u000f\u0001\u000f\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0011\u0001"+ - "\u0011\u0003\u0011\u0123\b\u0011\u0001\u0011\u0001\u0011\u0003\u0011\u0127"+ - "\b\u0011\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0003\u0012\u012d"+ - "\b\u0012\u0001\u0013\u0001\u0013\u0001\u0013\u0005\u0013\u0132\b\u0013"+ - "\n\u0013\f\u0013\u0135\t\u0013\u0001\u0014\u0001\u0014\u0001\u0015\u0001"+ - "\u0015\u0001\u0015\u0005\u0015\u013c\b\u0015\n\u0015\f\u0015\u013f\t\u0015"+ - "\u0001\u0016\u0001\u0016\u0001\u0016\u0005\u0016\u0144\b\u0016\n\u0016"+ - "\f\u0016\u0147\t\u0016\u0001\u0017\u0001\u0017\u0001\u0018\u0001\u0018"+ - "\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019"+ - "\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019"+ - "\u0001\u0019\u0005\u0019\u015a\b\u0019\n\u0019\f\u0019\u015d\t\u0019\u0001"+ - "\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0005"+ - "\u0019\u0165\b\u0019\n\u0019\f\u0019\u0168\t\u0019\u0001\u0019\u0001\u0019"+ - "\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0005\u0019\u0170\b\u0019"+ - "\n\u0019\f\u0019\u0173\t\u0019\u0001\u0019\u0001\u0019\u0003\u0019\u0177"+ - "\b\u0019\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001b\u0001\u001b\u0001"+ - "\u001b\u0001\u001b\u0005\u001b\u0180\b\u001b\n\u001b\f\u001b\u0183\t\u001b"+ - "\u0001\u001c\u0001\u001c\u0003\u001c\u0187\b\u001c\u0001\u001c\u0001\u001c"+ - "\u0003\u001c\u018b\b\u001c\u0001\u001d\u0001\u001d\u0001\u001d\u0001\u001d"+ - "\u0005\u001d\u0191\b\u001d\n\u001d\f\u001d\u0194\t\u001d\u0001\u001d\u0001"+ - "\u001d\u0001\u001d\u0001\u001d\u0005\u001d\u019a\b\u001d\n\u001d\f\u001d"+ - "\u019d\t\u001d\u0003\u001d\u019f\b\u001d\u0001\u001e\u0001\u001e\u0001"+ - "\u001e\u0001\u001e\u0005\u001e\u01a5\b\u001e\n\u001e\f\u001e\u01a8\t\u001e"+ - "\u0001\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0005\u001f\u01ae\b\u001f"+ - "\n\u001f\f\u001f\u01b1\t\u001f\u0001 \u0001 \u0001 \u0001 \u0001!\u0001"+ - "!\u0001!\u0001!\u0003!\u01bb\b!\u0001\"\u0001\"\u0001\"\u0001\"\u0001"+ - "#\u0001#\u0001#\u0001$\u0001$\u0001$\u0005$\u01c7\b$\n$\f$\u01ca\t$\u0001"+ - "%\u0001%\u0001%\u0001%\u0001&\u0001&\u0001\'\u0001\'\u0003\'\u01d4\b\'"+ - "\u0001(\u0003(\u01d7\b(\u0001(\u0001(\u0001)\u0003)\u01dc\b)\u0001)\u0001"+ - ")\u0001*\u0001*\u0001+\u0001+\u0001,\u0001,\u0001,\u0001-\u0001-\u0001"+ - "-\u0001-\u0001.\u0001.\u0001.\u0001.\u0003.\u01ef\b.\u0001/\u0001/\u0005"+ - "/\u01f3\b/\n/\f/\u01f6\t/\u0001/\u0001/\u0001/\u0003/\u01fb\b/\u0001/"+ - "\u0001/\u0001/\u0001/\u0005/\u0201\b/\n/\f/\u0204\t/\u0003/\u0206\b/\u0001"+ - "0\u00010\u00010\u00030\u020b\b0\u00010\u00010\u00011\u00011\u00011\u0001"+ - "1\u00011\u00011\u00011\u0000\u0003\u0002\n\u00102\u0000\u0002\u0004\u0006"+ - "\b\n\f\u000e\u0010\u0012\u0014\u0016\u0018\u001a\u001c\u001e \"$&(*,."+ - "02468:<>@BDFHJLNPRTVXZ\\^`b\u0000\t\u0001\u0000:;\u0001\u0000<>\u0002"+ - "\u0000BBGG\u0001\u0000AB\u0002\u0000BBKK\u0002\u0000 ##\u0001\u0000&"+ - "\'\u0002\u0000%%33\u0001\u000049\u0232\u0000d\u0001\u0000\u0000\u0000"+ - "\u0002g\u0001\u0000\u0000\u0000\u0004v\u0001\u0000\u0000\u0000\u0006\u0085"+ - "\u0001\u0000\u0000\u0000\b\u0087\u0001\u0000\u0000\u0000\n\u00a6\u0001"+ - "\u0000\u0000\u0000\f\u00c1\u0001\u0000\u0000\u0000\u000e\u00c8\u0001\u0000"+ - "\u0000\u0000\u0010\u00ce\u0001\u0000\u0000\u0000\u0012\u00e2\u0001\u0000"+ - "\u0000\u0000\u0014\u00e4\u0001\u0000\u0000\u0000\u0016\u00f3\u0001\u0000"+ - "\u0000\u0000\u0018\u00f6\u0001\u0000\u0000\u0000\u001a\u0103\u0001\u0000"+ - "\u0000\u0000\u001c\u0105\u0001\u0000\u0000\u0000\u001e\u0111\u0001\u0000"+ - "\u0000\u0000 \u011d\u0001\u0000\u0000\u0000\"\u0120\u0001\u0000\u0000"+ - "\u0000$\u0128\u0001\u0000\u0000\u0000&\u012e\u0001\u0000\u0000\u0000("+ - "\u0136\u0001\u0000\u0000\u0000*\u0138\u0001\u0000\u0000\u0000,\u0140\u0001"+ - "\u0000\u0000\u0000.\u0148\u0001\u0000\u0000\u00000\u014a\u0001\u0000\u0000"+ - "\u00002\u0176\u0001\u0000\u0000\u00004\u0178\u0001\u0000\u0000\u00006"+ - "\u017b\u0001\u0000\u0000\u00008\u0184\u0001\u0000\u0000\u0000:\u019e\u0001"+ - "\u0000\u0000\u0000<\u01a0\u0001\u0000\u0000\u0000>\u01a9\u0001\u0000\u0000"+ - "\u0000@\u01b2\u0001\u0000\u0000\u0000B\u01b6\u0001\u0000\u0000\u0000D"+ - "\u01bc\u0001\u0000\u0000\u0000F\u01c0\u0001\u0000\u0000\u0000H\u01c3\u0001"+ - "\u0000\u0000\u0000J\u01cb\u0001\u0000\u0000\u0000L\u01cf\u0001\u0000\u0000"+ - "\u0000N\u01d3\u0001\u0000\u0000\u0000P\u01d6\u0001\u0000\u0000\u0000R"+ - "\u01db\u0001\u0000\u0000\u0000T\u01df\u0001\u0000\u0000\u0000V\u01e1\u0001"+ - "\u0000\u0000\u0000X\u01e3\u0001\u0000\u0000\u0000Z\u01e6\u0001\u0000\u0000"+ - "\u0000\\\u01ee\u0001\u0000\u0000\u0000^\u01f0\u0001\u0000\u0000\u0000"+ - "`\u020a\u0001\u0000\u0000\u0000b\u020e\u0001\u0000\u0000\u0000de\u0003"+ - "\u0002\u0001\u0000ef\u0005\u0000\u0000\u0001f\u0001\u0001\u0000\u0000"+ - "\u0000gh\u0006\u0001\uffff\uffff\u0000hi\u0003\u0004\u0002\u0000io\u0001"+ - "\u0000\u0000\u0000jk\n\u0001\u0000\u0000kl\u0005\u001a\u0000\u0000ln\u0003"+ - "\u0006\u0003\u0000mj\u0001\u0000\u0000\u0000nq\u0001\u0000\u0000\u0000"+ - "om\u0001\u0000\u0000\u0000op\u0001\u0000\u0000\u0000p\u0003\u0001\u0000"+ - "\u0000\u0000qo\u0001\u0000\u0000\u0000rw\u0003X,\u0000sw\u0003\u001c\u000e"+ - "\u0000tw\u0003\u0016\u000b\u0000uw\u0003\\.\u0000vr\u0001\u0000\u0000"+ - "\u0000vs\u0001\u0000\u0000\u0000vt\u0001\u0000\u0000\u0000vu\u0001\u0000"+ - "\u0000\u0000w\u0005\u0001\u0000\u0000\u0000x\u0086\u0003 \u0010\u0000"+ - "y\u0086\u0003$\u0012\u0000z\u0086\u00034\u001a\u0000{\u0086\u0003:\u001d"+ - "\u0000|\u0086\u00036\u001b\u0000}\u0086\u0003\"\u0011\u0000~\u0086\u0003"+ - "\b\u0004\u0000\u007f\u0086\u0003<\u001e\u0000\u0080\u0086\u0003>\u001f"+ - "\u0000\u0081\u0086\u0003B!\u0000\u0082\u0086\u0003D\"\u0000\u0083\u0086"+ - "\u0003^/\u0000\u0084\u0086\u0003F#\u0000\u0085x\u0001\u0000\u0000\u0000"+ - "\u0085y\u0001\u0000\u0000\u0000\u0085z\u0001\u0000\u0000\u0000\u0085{"+ - "\u0001\u0000\u0000\u0000\u0085|\u0001\u0000\u0000\u0000\u0085}\u0001\u0000"+ - "\u0000\u0000\u0085~\u0001\u0000\u0000\u0000\u0085\u007f\u0001\u0000\u0000"+ - "\u0000\u0085\u0080\u0001\u0000\u0000\u0000\u0085\u0081\u0001\u0000\u0000"+ - "\u0000\u0085\u0082\u0001\u0000\u0000\u0000\u0085\u0083\u0001\u0000\u0000"+ - "\u0000\u0085\u0084\u0001\u0000\u0000\u0000\u0086\u0007\u0001\u0000\u0000"+ - "\u0000\u0087\u0088\u0005\u0012\u0000\u0000\u0088\u0089\u0003\n\u0005\u0000"+ - "\u0089\t\u0001\u0000\u0000\u0000\u008a\u008b\u0006\u0005\uffff\uffff\u0000"+ - "\u008b\u008c\u0005,\u0000\u0000\u008c\u00a7\u0003\n\u0005\u0007\u008d"+ - "\u00a7\u0003\u000e\u0007\u0000\u008e\u00a7\u0003\f\u0006\u0000\u008f\u0091"+ - "\u0003\u000e\u0007\u0000\u0090\u0092\u0005,\u0000\u0000\u0091\u0090\u0001"+ - "\u0000\u0000\u0000\u0091\u0092\u0001\u0000\u0000\u0000\u0092\u0093\u0001"+ - "\u0000\u0000\u0000\u0093\u0094\u0005)\u0000\u0000\u0094\u0095\u0005(\u0000"+ - "\u0000\u0095\u009a\u0003\u000e\u0007\u0000\u0096\u0097\u0005\"\u0000\u0000"+ - "\u0097\u0099\u0003\u000e\u0007\u0000\u0098\u0096\u0001\u0000\u0000\u0000"+ - "\u0099\u009c\u0001\u0000\u0000\u0000\u009a\u0098\u0001\u0000\u0000\u0000"+ - "\u009a\u009b\u0001\u0000\u0000\u0000\u009b\u009d\u0001\u0000\u0000\u0000"+ - "\u009c\u009a\u0001\u0000\u0000\u0000\u009d\u009e\u00052\u0000\u0000\u009e"+ - "\u00a7\u0001\u0000\u0000\u0000\u009f\u00a0\u0003\u000e\u0007\u0000\u00a0"+ - "\u00a2\u0005*\u0000\u0000\u00a1\u00a3\u0005,\u0000\u0000\u00a2\u00a1\u0001"+ - "\u0000\u0000\u0000\u00a2\u00a3\u0001\u0000\u0000\u0000\u00a3\u00a4\u0001"+ - "\u0000\u0000\u0000\u00a4\u00a5\u0005-\u0000\u0000\u00a5\u00a7\u0001\u0000"+ - "\u0000\u0000\u00a6\u008a\u0001\u0000\u0000\u0000\u00a6\u008d\u0001\u0000"+ - "\u0000\u0000\u00a6\u008e\u0001\u0000\u0000\u0000\u00a6\u008f\u0001\u0000"+ - "\u0000\u0000\u00a6\u009f\u0001\u0000\u0000\u0000\u00a7\u00b0\u0001\u0000"+ - "\u0000\u0000\u00a8\u00a9\n\u0004\u0000\u0000\u00a9\u00aa\u0005\u001f\u0000"+ - "\u0000\u00aa\u00af\u0003\n\u0005\u0005\u00ab\u00ac\n\u0003\u0000\u0000"+ - "\u00ac\u00ad\u0005/\u0000\u0000\u00ad\u00af\u0003\n\u0005\u0004\u00ae"+ - "\u00a8\u0001\u0000\u0000\u0000\u00ae\u00ab\u0001\u0000\u0000\u0000\u00af"+ - "\u00b2\u0001\u0000\u0000\u0000\u00b0\u00ae\u0001\u0000\u0000\u0000\u00b0"+ - "\u00b1\u0001\u0000\u0000\u0000\u00b1\u000b\u0001\u0000\u0000\u0000\u00b2"+ - "\u00b0\u0001\u0000\u0000\u0000\u00b3\u00b5\u0003\u000e\u0007\u0000\u00b4"+ - "\u00b6\u0005,\u0000\u0000\u00b5\u00b4\u0001\u0000\u0000\u0000\u00b5\u00b6"+ - "\u0001\u0000\u0000\u0000\u00b6\u00b7\u0001\u0000\u0000\u0000\u00b7\u00b8"+ - "\u0005+\u0000\u0000\u00b8\u00b9\u0003T*\u0000\u00b9\u00c2\u0001\u0000"+ - "\u0000\u0000\u00ba\u00bc\u0003\u000e\u0007\u0000\u00bb\u00bd\u0005,\u0000"+ - "\u0000\u00bc\u00bb\u0001\u0000\u0000\u0000\u00bc\u00bd\u0001\u0000\u0000"+ - "\u0000\u00bd\u00be\u0001\u0000\u0000\u0000\u00be\u00bf\u00051\u0000\u0000"+ - "\u00bf\u00c0\u0003T*\u0000\u00c0\u00c2\u0001\u0000\u0000\u0000\u00c1\u00b3"+ - "\u0001\u0000\u0000\u0000\u00c1\u00ba\u0001\u0000\u0000\u0000\u00c2\r\u0001"+ - "\u0000\u0000\u0000\u00c3\u00c9\u0003\u0010\b\u0000\u00c4\u00c5\u0003\u0010"+ - "\b\u0000\u00c5\u00c6\u0003V+\u0000\u00c6\u00c7\u0003\u0010\b\u0000\u00c7"+ - "\u00c9\u0001\u0000\u0000\u0000\u00c8\u00c3\u0001\u0000\u0000\u0000\u00c8"+ - "\u00c4\u0001\u0000\u0000\u0000\u00c9\u000f\u0001\u0000\u0000\u0000\u00ca"+ - "\u00cb\u0006\b\uffff\uffff\u0000\u00cb\u00cf\u0003\u0012\t\u0000\u00cc"+ - "\u00cd\u0007\u0000\u0000\u0000\u00cd\u00cf\u0003\u0010\b\u0003\u00ce\u00ca"+ - "\u0001\u0000\u0000\u0000\u00ce\u00cc\u0001\u0000\u0000\u0000\u00cf\u00d8"+ - "\u0001\u0000\u0000\u0000\u00d0\u00d1\n\u0002\u0000\u0000\u00d1\u00d2\u0007"+ - "\u0001\u0000\u0000\u00d2\u00d7\u0003\u0010\b\u0003\u00d3\u00d4\n\u0001"+ - "\u0000\u0000\u00d4\u00d5\u0007\u0000\u0000\u0000\u00d5\u00d7\u0003\u0010"+ - "\b\u0002\u00d6\u00d0\u0001\u0000\u0000\u0000\u00d6\u00d3\u0001\u0000\u0000"+ - "\u0000\u00d7\u00da\u0001\u0000\u0000\u0000\u00d8\u00d6\u0001\u0000\u0000"+ - "\u0000\u00d8\u00d9\u0001\u0000\u0000\u0000\u00d9\u0011\u0001\u0000\u0000"+ - "\u0000\u00da\u00d8\u0001\u0000\u0000\u0000\u00db\u00e3\u00032\u0019\u0000"+ - "\u00dc\u00e3\u0003*\u0015\u0000\u00dd\u00e3\u0003\u0014\n\u0000\u00de"+ - "\u00df\u0005(\u0000\u0000\u00df\u00e0\u0003\n\u0005\u0000\u00e0\u00e1"+ - "\u00052\u0000\u0000\u00e1\u00e3\u0001\u0000\u0000\u0000\u00e2\u00db\u0001"+ - "\u0000\u0000\u0000\u00e2\u00dc\u0001\u0000\u0000\u0000\u00e2\u00dd\u0001"+ - "\u0000\u0000\u0000\u00e2\u00de\u0001\u0000\u0000\u0000\u00e3\u0013\u0001"+ - "\u0000\u0000\u0000\u00e4\u00e5\u0003.\u0017\u0000\u00e5\u00ef\u0005(\u0000"+ - "\u0000\u00e6\u00f0\u0005<\u0000\u0000\u00e7\u00ec\u0003\n\u0005\u0000"+ - "\u00e8\u00e9\u0005\"\u0000\u0000\u00e9\u00eb\u0003\n\u0005\u0000\u00ea"+ - "\u00e8\u0001\u0000\u0000\u0000\u00eb\u00ee\u0001\u0000\u0000\u0000\u00ec"+ - "\u00ea\u0001\u0000\u0000\u0000\u00ec\u00ed\u0001\u0000\u0000\u0000\u00ed"+ - "\u00f0\u0001\u0000\u0000\u0000\u00ee\u00ec\u0001\u0000\u0000\u0000\u00ef"+ - "\u00e6\u0001\u0000\u0000\u0000\u00ef\u00e7\u0001\u0000\u0000\u0000\u00ef"+ - "\u00f0\u0001\u0000\u0000\u0000\u00f0\u00f1\u0001\u0000\u0000\u0000\u00f1"+ - "\u00f2\u00052\u0000\u0000\u00f2\u0015\u0001\u0000\u0000\u0000\u00f3\u00f4"+ - "\u0005\u000e\u0000\u0000\u00f4\u00f5\u0003\u0018\f\u0000\u00f5\u0017\u0001"+ - "\u0000\u0000\u0000\u00f6\u00fb\u0003\u001a\r\u0000\u00f7\u00f8\u0005\""+ - "\u0000\u0000\u00f8\u00fa\u0003\u001a\r\u0000\u00f9\u00f7\u0001\u0000\u0000"+ - "\u0000\u00fa\u00fd\u0001\u0000\u0000\u0000\u00fb\u00f9\u0001\u0000\u0000"+ - "\u0000\u00fb\u00fc\u0001\u0000\u0000\u0000\u00fc\u0019\u0001\u0000\u0000"+ - "\u0000\u00fd\u00fb\u0001\u0000\u0000\u0000\u00fe\u0104\u0003\n\u0005\u0000"+ - "\u00ff\u0100\u0003*\u0015\u0000\u0100\u0101\u0005!\u0000\u0000\u0101\u0102"+ - "\u0003\n\u0005\u0000\u0102\u0104\u0001\u0000\u0000\u0000\u0103\u00fe\u0001"+ - "\u0000\u0000\u0000\u0103\u00ff\u0001\u0000\u0000\u0000\u0104\u001b\u0001"+ - "\u0000\u0000\u0000\u0105\u0106\u0005\u0006\u0000\u0000\u0106\u010b\u0003"+ - "(\u0014\u0000\u0107\u0108\u0005\"\u0000\u0000\u0108\u010a\u0003(\u0014"+ - "\u0000\u0109\u0107\u0001\u0000\u0000\u0000\u010a\u010d\u0001\u0000\u0000"+ - "\u0000\u010b\u0109\u0001\u0000\u0000\u0000\u010b\u010c\u0001\u0000\u0000"+ - "\u0000\u010c\u010f\u0001\u0000\u0000\u0000\u010d\u010b\u0001\u0000\u0000"+ - "\u0000\u010e\u0110\u0003\u001e\u000f\u0000\u010f\u010e\u0001\u0000\u0000"+ - "\u0000\u010f\u0110\u0001\u0000\u0000\u0000\u0110\u001d\u0001\u0000\u0000"+ - "\u0000\u0111\u0112\u0005?\u0000\u0000\u0112\u0113\u0005F\u0000\u0000\u0113"+ - "\u0118\u0003(\u0014\u0000\u0114\u0115\u0005\"\u0000\u0000\u0115\u0117"+ - "\u0003(\u0014\u0000\u0116\u0114\u0001\u0000\u0000\u0000\u0117\u011a\u0001"+ - "\u0000\u0000\u0000\u0118\u0116\u0001\u0000\u0000\u0000\u0118\u0119\u0001"+ - "\u0000\u0000\u0000\u0119\u011b\u0001\u0000\u0000\u0000\u011a\u0118\u0001"+ - "\u0000\u0000\u0000\u011b\u011c\u0005@\u0000\u0000\u011c\u001f\u0001\u0000"+ - "\u0000\u0000\u011d\u011e\u0005\u0004\u0000\u0000\u011e\u011f\u0003\u0018"+ - "\f\u0000\u011f!\u0001\u0000\u0000\u0000\u0120\u0122\u0005\u0011\u0000"+ - "\u0000\u0121\u0123\u0003\u0018\f\u0000\u0122\u0121\u0001\u0000\u0000\u0000"+ - "\u0122\u0123\u0001\u0000\u0000\u0000\u0123\u0126\u0001\u0000\u0000\u0000"+ - "\u0124\u0125\u0005\u001e\u0000\u0000\u0125\u0127\u0003&\u0013\u0000\u0126"+ - "\u0124\u0001\u0000\u0000\u0000\u0126\u0127\u0001\u0000\u0000\u0000\u0127"+ - "#\u0001\u0000\u0000\u0000\u0128\u0129\u0005\b\u0000\u0000\u0129\u012c"+ - "\u0003\u0018\f\u0000\u012a\u012b\u0005\u001e\u0000\u0000\u012b\u012d\u0003"+ - "&\u0013\u0000\u012c\u012a\u0001\u0000\u0000\u0000\u012c\u012d\u0001\u0000"+ - "\u0000\u0000\u012d%\u0001\u0000\u0000\u0000\u012e\u0133\u0003*\u0015\u0000"+ - "\u012f\u0130\u0005\"\u0000\u0000\u0130\u0132\u0003*\u0015\u0000\u0131"+ - "\u012f\u0001\u0000\u0000\u0000\u0132\u0135\u0001\u0000\u0000\u0000\u0133"+ - "\u0131\u0001\u0000\u0000\u0000\u0133\u0134\u0001\u0000\u0000\u0000\u0134"+ - "\'\u0001\u0000\u0000\u0000\u0135\u0133\u0001\u0000\u0000\u0000\u0136\u0137"+ - "\u0007\u0002\u0000\u0000\u0137)\u0001\u0000\u0000\u0000\u0138\u013d\u0003"+ - ".\u0017\u0000\u0139\u013a\u0005$\u0000\u0000\u013a\u013c\u0003.\u0017"+ - "\u0000\u013b\u0139\u0001\u0000\u0000\u0000\u013c\u013f\u0001\u0000\u0000"+ - "\u0000\u013d\u013b\u0001\u0000\u0000\u0000\u013d\u013e\u0001\u0000\u0000"+ - "\u0000\u013e+\u0001\u0000\u0000\u0000\u013f\u013d\u0001\u0000\u0000\u0000"+ - "\u0140\u0145\u00030\u0018\u0000\u0141\u0142\u0005$\u0000\u0000\u0142\u0144"+ - "\u00030\u0018\u0000\u0143\u0141\u0001\u0000\u0000\u0000\u0144\u0147\u0001"+ - "\u0000\u0000\u0000\u0145\u0143\u0001\u0000\u0000\u0000\u0145\u0146\u0001"+ - "\u0000\u0000\u0000\u0146-\u0001\u0000\u0000\u0000\u0147\u0145\u0001\u0000"+ - "\u0000\u0000\u0148\u0149\u0007\u0003\u0000\u0000\u0149/\u0001\u0000\u0000"+ - "\u0000\u014a\u014b\u0007\u0004\u0000\u0000\u014b1\u0001\u0000\u0000\u0000"+ - "\u014c\u0177\u0005-\u0000\u0000\u014d\u014e\u0003R)\u0000\u014e\u014f"+ - "\u0005A\u0000\u0000\u014f\u0177\u0001\u0000\u0000\u0000\u0150\u0177\u0003"+ - "P(\u0000\u0151\u0177\u0003R)\u0000\u0152\u0177\u0003L&\u0000\u0153\u0177"+ - "\u00050\u0000\u0000\u0154\u0177\u0003T*\u0000\u0155\u0156\u0005?\u0000"+ - "\u0000\u0156\u015b\u0003N\'\u0000\u0157\u0158\u0005\"\u0000\u0000\u0158"+ - "\u015a\u0003N\'\u0000\u0159\u0157\u0001\u0000\u0000\u0000\u015a\u015d"+ - "\u0001\u0000\u0000\u0000\u015b\u0159\u0001\u0000\u0000\u0000\u015b\u015c"+ - "\u0001\u0000\u0000\u0000\u015c\u015e\u0001\u0000\u0000\u0000\u015d\u015b"+ - "\u0001\u0000\u0000\u0000\u015e\u015f\u0005@\u0000\u0000\u015f\u0177\u0001"+ - "\u0000\u0000\u0000\u0160\u0161\u0005?\u0000\u0000\u0161\u0166\u0003L&"+ - "\u0000\u0162\u0163\u0005\"\u0000\u0000\u0163\u0165\u0003L&\u0000\u0164"+ - "\u0162\u0001\u0000\u0000\u0000\u0165\u0168\u0001\u0000\u0000\u0000\u0166"+ - "\u0164\u0001\u0000\u0000\u0000\u0166\u0167\u0001\u0000\u0000\u0000\u0167"+ - "\u0169\u0001\u0000\u0000\u0000\u0168\u0166\u0001\u0000\u0000\u0000\u0169"+ - "\u016a\u0005@\u0000\u0000\u016a\u0177\u0001\u0000\u0000\u0000\u016b\u016c"+ - "\u0005?\u0000\u0000\u016c\u0171\u0003T*\u0000\u016d\u016e\u0005\"\u0000"+ - "\u0000\u016e\u0170\u0003T*\u0000\u016f\u016d\u0001\u0000\u0000\u0000\u0170"+ - "\u0173\u0001\u0000\u0000\u0000\u0171\u016f\u0001\u0000\u0000\u0000\u0171"+ - "\u0172\u0001\u0000\u0000\u0000\u0172\u0174\u0001\u0000\u0000\u0000\u0173"+ - "\u0171\u0001\u0000\u0000\u0000\u0174\u0175\u0005@\u0000\u0000\u0175\u0177"+ - "\u0001\u0000\u0000\u0000\u0176\u014c\u0001\u0000\u0000\u0000\u0176\u014d"+ - "\u0001\u0000\u0000\u0000\u0176\u0150\u0001\u0000\u0000\u0000\u0176\u0151"+ - "\u0001\u0000\u0000\u0000\u0176\u0152\u0001\u0000\u0000\u0000\u0176\u0153"+ - "\u0001\u0000\u0000\u0000\u0176\u0154\u0001\u0000\u0000\u0000\u0176\u0155"+ - "\u0001\u0000\u0000\u0000\u0176\u0160\u0001\u0000\u0000\u0000\u0176\u016b"+ - "\u0001\u0000\u0000\u0000\u01773\u0001\u0000\u0000\u0000\u0178\u0179\u0005"+ - "\n\u0000\u0000\u0179\u017a\u0005\u001c\u0000\u0000\u017a5\u0001\u0000"+ - "\u0000\u0000\u017b\u017c\u0005\u0010\u0000\u0000\u017c\u0181\u00038\u001c"+ - "\u0000\u017d\u017e\u0005\"\u0000\u0000\u017e\u0180\u00038\u001c\u0000"+ - "\u017f\u017d\u0001\u0000\u0000\u0000\u0180\u0183\u0001\u0000\u0000\u0000"+ - "\u0181\u017f\u0001\u0000\u0000\u0000\u0181\u0182\u0001\u0000\u0000\u0000"+ - "\u01827\u0001\u0000\u0000\u0000\u0183\u0181\u0001\u0000\u0000\u0000\u0184"+ - "\u0186\u0003\n\u0005\u0000\u0185\u0187\u0007\u0005\u0000\u0000\u0186\u0185"+ - "\u0001\u0000\u0000\u0000\u0186\u0187\u0001\u0000\u0000\u0000\u0187\u018a"+ - "\u0001\u0000\u0000\u0000\u0188\u0189\u0005.\u0000\u0000\u0189\u018b\u0007"+ - "\u0006\u0000\u0000\u018a\u0188\u0001\u0000\u0000\u0000\u018a\u018b\u0001"+ - "\u0000\u0000\u0000\u018b9\u0001\u0000\u0000\u0000\u018c\u018d\u0005\t"+ - "\u0000\u0000\u018d\u0192\u0003,\u0016\u0000\u018e\u018f\u0005\"\u0000"+ - "\u0000\u018f\u0191\u0003,\u0016\u0000\u0190\u018e\u0001\u0000\u0000\u0000"+ - "\u0191\u0194\u0001\u0000\u0000\u0000\u0192\u0190\u0001\u0000\u0000\u0000"+ - "\u0192\u0193\u0001\u0000\u0000\u0000\u0193\u019f\u0001\u0000\u0000\u0000"+ - "\u0194\u0192\u0001\u0000\u0000\u0000\u0195\u0196\u0005\f\u0000\u0000\u0196"+ - "\u019b\u0003,\u0016\u0000\u0197\u0198\u0005\"\u0000\u0000\u0198\u019a"+ - "\u0003,\u0016\u0000\u0199\u0197\u0001\u0000\u0000\u0000\u019a\u019d\u0001"+ - "\u0000\u0000\u0000\u019b\u0199\u0001\u0000\u0000\u0000\u019b\u019c\u0001"+ - "\u0000\u0000\u0000\u019c\u019f\u0001\u0000\u0000\u0000\u019d\u019b\u0001"+ - "\u0000\u0000\u0000\u019e\u018c\u0001\u0000\u0000\u0000\u019e\u0195\u0001"+ - "\u0000\u0000\u0000\u019f;\u0001\u0000\u0000\u0000\u01a0\u01a1\u0005\u0002"+ - "\u0000\u0000\u01a1\u01a6\u0003,\u0016\u0000\u01a2\u01a3\u0005\"\u0000"+ - "\u0000\u01a3\u01a5\u0003,\u0016\u0000\u01a4\u01a2\u0001\u0000\u0000\u0000"+ - "\u01a5\u01a8\u0001\u0000\u0000\u0000\u01a6\u01a4\u0001\u0000\u0000\u0000"+ - "\u01a6\u01a7\u0001\u0000\u0000\u0000\u01a7=\u0001\u0000\u0000\u0000\u01a8"+ - "\u01a6\u0001\u0000\u0000\u0000\u01a9\u01aa\u0005\r\u0000\u0000\u01aa\u01af"+ - "\u0003@ \u0000\u01ab\u01ac\u0005\"\u0000\u0000\u01ac\u01ae\u0003@ \u0000"+ - "\u01ad\u01ab\u0001\u0000\u0000\u0000\u01ae\u01b1\u0001\u0000\u0000\u0000"+ - "\u01af\u01ad\u0001\u0000\u0000\u0000\u01af\u01b0\u0001\u0000\u0000\u0000"+ - "\u01b0?\u0001\u0000\u0000\u0000\u01b1\u01af\u0001\u0000\u0000\u0000\u01b2"+ - "\u01b3\u0003,\u0016\u0000\u01b3\u01b4\u0005O\u0000\u0000\u01b4\u01b5\u0003"+ - ",\u0016\u0000\u01b5A\u0001\u0000\u0000\u0000\u01b6\u01b7\u0005\u0001\u0000"+ - "\u0000\u01b7\u01b8\u0003\u0012\t\u0000\u01b8\u01ba\u0003T*\u0000\u01b9"+ - "\u01bb\u0003H$\u0000\u01ba\u01b9\u0001\u0000\u0000\u0000\u01ba\u01bb\u0001"+ - "\u0000\u0000\u0000\u01bbC\u0001\u0000\u0000\u0000\u01bc\u01bd\u0005\u0007"+ - "\u0000\u0000\u01bd\u01be\u0003\u0012\t\u0000\u01be\u01bf\u0003T*\u0000"+ - "\u01bfE\u0001\u0000\u0000\u0000\u01c0\u01c1\u0005\u000b\u0000\u0000\u01c1"+ - "\u01c2\u0003*\u0015\u0000\u01c2G\u0001\u0000\u0000\u0000\u01c3\u01c8\u0003"+ - "J%\u0000\u01c4\u01c5\u0005\"\u0000\u0000\u01c5\u01c7\u0003J%\u0000\u01c6"+ - "\u01c4\u0001\u0000\u0000\u0000\u01c7\u01ca\u0001\u0000\u0000\u0000\u01c8"+ - "\u01c6\u0001\u0000\u0000\u0000\u01c8\u01c9\u0001\u0000\u0000\u0000\u01c9"+ - "I\u0001\u0000\u0000\u0000\u01ca\u01c8\u0001\u0000\u0000\u0000\u01cb\u01cc"+ - "\u0003.\u0017\u0000\u01cc\u01cd\u0005!\u0000\u0000\u01cd\u01ce\u00032"+ - "\u0019\u0000\u01ceK\u0001\u0000\u0000\u0000\u01cf\u01d0\u0007\u0007\u0000"+ - "\u0000\u01d0M\u0001\u0000\u0000\u0000\u01d1\u01d4\u0003P(\u0000\u01d2"+ - "\u01d4\u0003R)\u0000\u01d3\u01d1\u0001\u0000\u0000\u0000\u01d3\u01d2\u0001"+ - "\u0000\u0000\u0000\u01d4O\u0001\u0000\u0000\u0000\u01d5\u01d7\u0007\u0000"+ - "\u0000\u0000\u01d6\u01d5\u0001\u0000\u0000\u0000\u01d6\u01d7\u0001\u0000"+ - "\u0000\u0000\u01d7\u01d8\u0001\u0000\u0000\u0000\u01d8\u01d9\u0005\u001d"+ - "\u0000\u0000\u01d9Q\u0001\u0000\u0000\u0000\u01da\u01dc\u0007\u0000\u0000"+ - "\u0000\u01db\u01da\u0001\u0000\u0000\u0000\u01db\u01dc\u0001\u0000\u0000"+ - "\u0000\u01dc\u01dd\u0001\u0000\u0000\u0000\u01dd\u01de\u0005\u001c\u0000"+ - "\u0000\u01deS\u0001\u0000\u0000\u0000\u01df\u01e0\u0005\u001b\u0000\u0000"+ - "\u01e0U\u0001\u0000\u0000\u0000\u01e1\u01e2\u0007\b\u0000\u0000\u01e2"+ - "W\u0001\u0000\u0000\u0000\u01e3\u01e4\u0005\u0005\u0000\u0000\u01e4\u01e5"+ - "\u0003Z-\u0000\u01e5Y\u0001\u0000\u0000\u0000\u01e6\u01e7\u0005?\u0000"+ - "\u0000\u01e7\u01e8\u0003\u0002\u0001\u0000\u01e8\u01e9\u0005@\u0000\u0000"+ - "\u01e9[\u0001\u0000\u0000\u0000\u01ea\u01eb\u0005\u000f\u0000\u0000\u01eb"+ - "\u01ef\u0005_\u0000\u0000\u01ec\u01ed\u0005\u000f\u0000\u0000\u01ed\u01ef"+ - "\u0005`\u0000\u0000\u01ee\u01ea\u0001\u0000\u0000\u0000\u01ee\u01ec\u0001"+ - "\u0000\u0000\u0000\u01ef]\u0001\u0000\u0000\u0000\u01f0\u01f4\u0005\u0003"+ - "\u0000\u0000\u01f1\u01f3\u0003b1\u0000\u01f2\u01f1\u0001\u0000\u0000\u0000"+ - "\u01f3\u01f6\u0001\u0000\u0000\u0000\u01f4\u01f2\u0001\u0000\u0000\u0000"+ - "\u01f4\u01f5\u0001\u0000\u0000\u0000\u01f5\u01f7\u0001\u0000\u0000\u0000"+ - "\u01f6\u01f4\u0001\u0000\u0000\u0000\u01f7\u01fa\u0005U\u0000\u0000\u01f8"+ - "\u01f9\u0005S\u0000\u0000\u01f9\u01fb\u0003,\u0016\u0000\u01fa\u01f8\u0001"+ - "\u0000\u0000\u0000\u01fa\u01fb\u0001\u0000\u0000\u0000\u01fb\u0205\u0001"+ - "\u0000\u0000\u0000\u01fc\u01fd\u0005T\u0000\u0000\u01fd\u0202\u0003`0"+ - "\u0000\u01fe\u01ff\u0005\"\u0000\u0000\u01ff\u0201\u0003`0\u0000\u0200"+ - "\u01fe\u0001\u0000\u0000\u0000\u0201\u0204\u0001\u0000\u0000\u0000\u0202"+ - "\u0200\u0001\u0000\u0000\u0000\u0202\u0203\u0001\u0000\u0000\u0000\u0203"+ - "\u0206\u0001\u0000\u0000\u0000\u0204\u0202\u0001\u0000\u0000\u0000\u0205"+ - "\u01fc\u0001\u0000\u0000\u0000\u0205\u0206\u0001\u0000\u0000\u0000\u0206"+ - "_\u0001\u0000\u0000\u0000\u0207\u0208\u0003,\u0016\u0000\u0208\u0209\u0005"+ - "!\u0000\u0000\u0209\u020b\u0001\u0000\u0000\u0000\u020a\u0207\u0001\u0000"+ - "\u0000\u0000\u020a\u020b\u0001\u0000\u0000\u0000\u020b\u020c\u0001\u0000"+ - "\u0000\u0000\u020c\u020d\u0003,\u0016\u0000\u020da\u0001\u0000\u0000\u0000"+ - "\u020e\u020f\u0005?\u0000\u0000\u020f\u0210\u0005e\u0000\u0000\u0210\u0211"+ - "\u0005d\u0000\u0000\u0211\u0212\u0005e\u0000\u0000\u0212\u0213\u0005@"+ - "\u0000\u0000\u0213c\u0001\u0000\u0000\u00005ov\u0085\u0091\u009a\u00a2"+ - "\u00a6\u00ae\u00b0\u00b5\u00bc\u00c1\u00c8\u00ce\u00d6\u00d8\u00e2\u00ec"+ - "\u00ef\u00fb\u0103\u010b\u010f\u0118\u0122\u0126\u012c\u0133\u013d\u0145"+ - "\u015b\u0166\u0171\u0176\u0181\u0186\u018a\u0192\u019b\u019e\u01a6\u01af"+ - "\u01ba\u01c8\u01d3\u01d6\u01db\u01ee\u01f4\u01fa\u0202\u0205\u020a"; + "-\u0007-\u0002.\u0007.\u0002/\u0007/\u00020\u00070\u0001\u0000\u0001\u0000"+ + "\u0001\u0000\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ + "\u0001\u0001\u0005\u0001l\b\u0001\n\u0001\f\u0001o\t\u0001\u0001\u0002"+ + "\u0001\u0002\u0001\u0002\u0001\u0002\u0003\u0002u\b\u0002\u0001\u0003"+ + "\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003"+ + "\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003"+ + "\u0003\u0003\u0084\b\u0003\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0005"+ + "\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005"+ + "\u0003\u0005\u0090\b\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005"+ + "\u0001\u0005\u0005\u0005\u0097\b\u0005\n\u0005\f\u0005\u009a\t\u0005\u0001"+ + "\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0003\u0005\u00a1"+ + "\b\u0005\u0001\u0005\u0001\u0005\u0003\u0005\u00a5\b\u0005\u0001\u0005"+ + "\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0005\u0005"+ + "\u00ad\b\u0005\n\u0005\f\u0005\u00b0\t\u0005\u0001\u0006\u0001\u0006\u0003"+ + "\u0006\u00b4\b\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001"+ + "\u0006\u0003\u0006\u00bb\b\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0003"+ + "\u0006\u00c0\b\u0006\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001"+ + "\u0007\u0003\u0007\u00c7\b\u0007\u0001\b\u0001\b\u0001\b\u0001\b\u0003"+ + "\b\u00cd\b\b\u0001\b\u0001\b\u0001\b\u0001\b\u0001\b\u0001\b\u0005\b\u00d5"+ + "\b\b\n\b\f\b\u00d8\t\b\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t"+ + "\u0001\t\u0003\t\u00e1\b\t\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001"+ + "\n\u0005\n\u00e9\b\n\n\n\f\n\u00ec\t\n\u0003\n\u00ee\b\n\u0001\n\u0001"+ + "\n\u0001\u000b\u0001\u000b\u0001\u000b\u0001\f\u0001\f\u0001\f\u0005\f"+ + "\u00f8\b\f\n\f\f\f\u00fb\t\f\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0003"+ + "\r\u0102\b\r\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0005\u000e"+ + "\u0108\b\u000e\n\u000e\f\u000e\u010b\t\u000e\u0001\u000e\u0003\u000e\u010e"+ + "\b\u000e\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0005"+ + "\u000f\u0115\b\u000f\n\u000f\f\u000f\u0118\t\u000f\u0001\u000f\u0001\u000f"+ + "\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0011\u0001\u0011\u0003\u0011"+ + "\u0121\b\u0011\u0001\u0011\u0001\u0011\u0003\u0011\u0125\b\u0011\u0001"+ + "\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0003\u0012\u012b\b\u0012\u0001"+ + "\u0013\u0001\u0013\u0001\u0014\u0001\u0014\u0001\u0014\u0005\u0014\u0132"+ + "\b\u0014\n\u0014\f\u0014\u0135\t\u0014\u0001\u0015\u0001\u0015\u0001\u0015"+ + "\u0005\u0015\u013a\b\u0015\n\u0015\f\u0015\u013d\t\u0015\u0001\u0016\u0001"+ + "\u0016\u0001\u0017\u0001\u0017\u0001\u0018\u0001\u0018\u0001\u0018\u0001"+ + "\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001"+ + "\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0005\u0018\u0150\b\u0018\n"+ + "\u0018\f\u0018\u0153\t\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001"+ + "\u0018\u0001\u0018\u0001\u0018\u0005\u0018\u015b\b\u0018\n\u0018\f\u0018"+ + "\u015e\t\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018"+ + "\u0001\u0018\u0005\u0018\u0166\b\u0018\n\u0018\f\u0018\u0169\t\u0018\u0001"+ + "\u0018\u0001\u0018\u0003\u0018\u016d\b\u0018\u0001\u0019\u0001\u0019\u0001"+ + "\u0019\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0005\u001a\u0176"+ + "\b\u001a\n\u001a\f\u001a\u0179\t\u001a\u0001\u001b\u0001\u001b\u0003\u001b"+ + "\u017d\b\u001b\u0001\u001b\u0001\u001b\u0003\u001b\u0181\b\u001b\u0001"+ + "\u001c\u0001\u001c\u0001\u001c\u0001\u001c\u0005\u001c\u0187\b\u001c\n"+ + "\u001c\f\u001c\u018a\t\u001c\u0001\u001c\u0001\u001c\u0001\u001c\u0001"+ + "\u001c\u0005\u001c\u0190\b\u001c\n\u001c\f\u001c\u0193\t\u001c\u0003\u001c"+ + "\u0195\b\u001c\u0001\u001d\u0001\u001d\u0001\u001d\u0001\u001d\u0005\u001d"+ + "\u019b\b\u001d\n\u001d\f\u001d\u019e\t\u001d\u0001\u001e\u0001\u001e\u0001"+ + "\u001e\u0001\u001e\u0005\u001e\u01a4\b\u001e\n\u001e\f\u001e\u01a7\t\u001e"+ + "\u0001\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0001 \u0001 \u0001 "+ + "\u0001 \u0003 \u01b1\b \u0001!\u0001!\u0001!\u0001!\u0001\"\u0001\"\u0001"+ + "\"\u0001#\u0001#\u0001#\u0005#\u01bd\b#\n#\f#\u01c0\t#\u0001$\u0001$\u0001"+ + "$\u0001$\u0001%\u0001%\u0001&\u0001&\u0003&\u01ca\b&\u0001\'\u0003\'\u01cd"+ + "\b\'\u0001\'\u0001\'\u0001(\u0003(\u01d2\b(\u0001(\u0001(\u0001)\u0001"+ + ")\u0001*\u0001*\u0001+\u0001+\u0001+\u0001,\u0001,\u0001,\u0001,\u0001"+ + "-\u0001-\u0001-\u0001-\u0003-\u01e5\b-\u0001.\u0001.\u0005.\u01e9\b.\n"+ + ".\f.\u01ec\t.\u0001.\u0001.\u0001.\u0003.\u01f1\b.\u0001.\u0001.\u0001"+ + ".\u0001.\u0005.\u01f7\b.\n.\f.\u01fa\t.\u0003.\u01fc\b.\u0001/\u0001/"+ + "\u0001/\u0003/\u0201\b/\u0001/\u0001/\u00010\u00010\u00010\u00010\u0001"+ + "0\u00010\u00010\u0000\u0003\u0002\n\u00101\u0000\u0002\u0004\u0006\b\n"+ + "\f\u000e\u0010\u0012\u0014\u0016\u0018\u001a\u001c\u001e \"$&(*,.0246"+ + "8:<>@BDFHJLNPRTVXZ\\^`\u0000\t\u0001\u0000:;\u0001\u0000<>\u0002\u0000"+ + "BBGG\u0001\u0000AB\u0002\u0000BBKK\u0002\u0000 ##\u0001\u0000&\'\u0002"+ + "\u0000%%33\u0001\u000049\u0228\u0000b\u0001\u0000\u0000\u0000\u0002e\u0001"+ + "\u0000\u0000\u0000\u0004t\u0001\u0000\u0000\u0000\u0006\u0083\u0001\u0000"+ + "\u0000\u0000\b\u0085\u0001\u0000\u0000\u0000\n\u00a4\u0001\u0000\u0000"+ + "\u0000\f\u00bf\u0001\u0000\u0000\u0000\u000e\u00c6\u0001\u0000\u0000\u0000"+ + "\u0010\u00cc\u0001\u0000\u0000\u0000\u0012\u00e0\u0001\u0000\u0000\u0000"+ + "\u0014\u00e2\u0001\u0000\u0000\u0000\u0016\u00f1\u0001\u0000\u0000\u0000"+ + "\u0018\u00f4\u0001\u0000\u0000\u0000\u001a\u0101\u0001\u0000\u0000\u0000"+ + "\u001c\u0103\u0001\u0000\u0000\u0000\u001e\u010f\u0001\u0000\u0000\u0000"+ + " \u011b\u0001\u0000\u0000\u0000\"\u011e\u0001\u0000\u0000\u0000$\u0126"+ + "\u0001\u0000\u0000\u0000&\u012c\u0001\u0000\u0000\u0000(\u012e\u0001\u0000"+ + "\u0000\u0000*\u0136\u0001\u0000\u0000\u0000,\u013e\u0001\u0000\u0000\u0000"+ + ".\u0140\u0001\u0000\u0000\u00000\u016c\u0001\u0000\u0000\u00002\u016e"+ + "\u0001\u0000\u0000\u00004\u0171\u0001\u0000\u0000\u00006\u017a\u0001\u0000"+ + "\u0000\u00008\u0194\u0001\u0000\u0000\u0000:\u0196\u0001\u0000\u0000\u0000"+ + "<\u019f\u0001\u0000\u0000\u0000>\u01a8\u0001\u0000\u0000\u0000@\u01ac"+ + "\u0001\u0000\u0000\u0000B\u01b2\u0001\u0000\u0000\u0000D\u01b6\u0001\u0000"+ + "\u0000\u0000F\u01b9\u0001\u0000\u0000\u0000H\u01c1\u0001\u0000\u0000\u0000"+ + "J\u01c5\u0001\u0000\u0000\u0000L\u01c9\u0001\u0000\u0000\u0000N\u01cc"+ + "\u0001\u0000\u0000\u0000P\u01d1\u0001\u0000\u0000\u0000R\u01d5\u0001\u0000"+ + "\u0000\u0000T\u01d7\u0001\u0000\u0000\u0000V\u01d9\u0001\u0000\u0000\u0000"+ + "X\u01dc\u0001\u0000\u0000\u0000Z\u01e4\u0001\u0000\u0000\u0000\\\u01e6"+ + "\u0001\u0000\u0000\u0000^\u0200\u0001\u0000\u0000\u0000`\u0204\u0001\u0000"+ + "\u0000\u0000bc\u0003\u0002\u0001\u0000cd\u0005\u0000\u0000\u0001d\u0001"+ + "\u0001\u0000\u0000\u0000ef\u0006\u0001\uffff\uffff\u0000fg\u0003\u0004"+ + "\u0002\u0000gm\u0001\u0000\u0000\u0000hi\n\u0001\u0000\u0000ij\u0005\u001a"+ + "\u0000\u0000jl\u0003\u0006\u0003\u0000kh\u0001\u0000\u0000\u0000lo\u0001"+ + "\u0000\u0000\u0000mk\u0001\u0000\u0000\u0000mn\u0001\u0000\u0000\u0000"+ + "n\u0003\u0001\u0000\u0000\u0000om\u0001\u0000\u0000\u0000pu\u0003V+\u0000"+ + "qu\u0003\u001c\u000e\u0000ru\u0003\u0016\u000b\u0000su\u0003Z-\u0000t"+ + "p\u0001\u0000\u0000\u0000tq\u0001\u0000\u0000\u0000tr\u0001\u0000\u0000"+ + "\u0000ts\u0001\u0000\u0000\u0000u\u0005\u0001\u0000\u0000\u0000v\u0084"+ + "\u0003 \u0010\u0000w\u0084\u0003$\u0012\u0000x\u0084\u00032\u0019\u0000"+ + "y\u0084\u00038\u001c\u0000z\u0084\u00034\u001a\u0000{\u0084\u0003\"\u0011"+ + "\u0000|\u0084\u0003\b\u0004\u0000}\u0084\u0003:\u001d\u0000~\u0084\u0003"+ + "<\u001e\u0000\u007f\u0084\u0003@ \u0000\u0080\u0084\u0003B!\u0000\u0081"+ + "\u0084\u0003\\.\u0000\u0082\u0084\u0003D\"\u0000\u0083v\u0001\u0000\u0000"+ + "\u0000\u0083w\u0001\u0000\u0000\u0000\u0083x\u0001\u0000\u0000\u0000\u0083"+ + "y\u0001\u0000\u0000\u0000\u0083z\u0001\u0000\u0000\u0000\u0083{\u0001"+ + "\u0000\u0000\u0000\u0083|\u0001\u0000\u0000\u0000\u0083}\u0001\u0000\u0000"+ + "\u0000\u0083~\u0001\u0000\u0000\u0000\u0083\u007f\u0001\u0000\u0000\u0000"+ + "\u0083\u0080\u0001\u0000\u0000\u0000\u0083\u0081\u0001\u0000\u0000\u0000"+ + "\u0083\u0082\u0001\u0000\u0000\u0000\u0084\u0007\u0001\u0000\u0000\u0000"+ + "\u0085\u0086\u0005\u0012\u0000\u0000\u0086\u0087\u0003\n\u0005\u0000\u0087"+ + "\t\u0001\u0000\u0000\u0000\u0088\u0089\u0006\u0005\uffff\uffff\u0000\u0089"+ + "\u008a\u0005,\u0000\u0000\u008a\u00a5\u0003\n\u0005\u0007\u008b\u00a5"+ + "\u0003\u000e\u0007\u0000\u008c\u00a5\u0003\f\u0006\u0000\u008d\u008f\u0003"+ + "\u000e\u0007\u0000\u008e\u0090\u0005,\u0000\u0000\u008f\u008e\u0001\u0000"+ + "\u0000\u0000\u008f\u0090\u0001\u0000\u0000\u0000\u0090\u0091\u0001\u0000"+ + "\u0000\u0000\u0091\u0092\u0005)\u0000\u0000\u0092\u0093\u0005(\u0000\u0000"+ + "\u0093\u0098\u0003\u000e\u0007\u0000\u0094\u0095\u0005\"\u0000\u0000\u0095"+ + "\u0097\u0003\u000e\u0007\u0000\u0096\u0094\u0001\u0000\u0000\u0000\u0097"+ + "\u009a\u0001\u0000\u0000\u0000\u0098\u0096\u0001\u0000\u0000\u0000\u0098"+ + "\u0099\u0001\u0000\u0000\u0000\u0099\u009b\u0001\u0000\u0000\u0000\u009a"+ + "\u0098\u0001\u0000\u0000\u0000\u009b\u009c\u00052\u0000\u0000\u009c\u00a5"+ + "\u0001\u0000\u0000\u0000\u009d\u009e\u0003\u000e\u0007\u0000\u009e\u00a0"+ + "\u0005*\u0000\u0000\u009f\u00a1\u0005,\u0000\u0000\u00a0\u009f\u0001\u0000"+ + "\u0000\u0000\u00a0\u00a1\u0001\u0000\u0000\u0000\u00a1\u00a2\u0001\u0000"+ + "\u0000\u0000\u00a2\u00a3\u0005-\u0000\u0000\u00a3\u00a5\u0001\u0000\u0000"+ + "\u0000\u00a4\u0088\u0001\u0000\u0000\u0000\u00a4\u008b\u0001\u0000\u0000"+ + "\u0000\u00a4\u008c\u0001\u0000\u0000\u0000\u00a4\u008d\u0001\u0000\u0000"+ + "\u0000\u00a4\u009d\u0001\u0000\u0000\u0000\u00a5\u00ae\u0001\u0000\u0000"+ + "\u0000\u00a6\u00a7\n\u0004\u0000\u0000\u00a7\u00a8\u0005\u001f\u0000\u0000"+ + "\u00a8\u00ad\u0003\n\u0005\u0005\u00a9\u00aa\n\u0003\u0000\u0000\u00aa"+ + "\u00ab\u0005/\u0000\u0000\u00ab\u00ad\u0003\n\u0005\u0004\u00ac\u00a6"+ + "\u0001\u0000\u0000\u0000\u00ac\u00a9\u0001\u0000\u0000\u0000\u00ad\u00b0"+ + "\u0001\u0000\u0000\u0000\u00ae\u00ac\u0001\u0000\u0000\u0000\u00ae\u00af"+ + "\u0001\u0000\u0000\u0000\u00af\u000b\u0001\u0000\u0000\u0000\u00b0\u00ae"+ + "\u0001\u0000\u0000\u0000\u00b1\u00b3\u0003\u000e\u0007\u0000\u00b2\u00b4"+ + "\u0005,\u0000\u0000\u00b3\u00b2\u0001\u0000\u0000\u0000\u00b3\u00b4\u0001"+ + "\u0000\u0000\u0000\u00b4\u00b5\u0001\u0000\u0000\u0000\u00b5\u00b6\u0005"+ + "+\u0000\u0000\u00b6\u00b7\u0003R)\u0000\u00b7\u00c0\u0001\u0000\u0000"+ + "\u0000\u00b8\u00ba\u0003\u000e\u0007\u0000\u00b9\u00bb\u0005,\u0000\u0000"+ + "\u00ba\u00b9\u0001\u0000\u0000\u0000\u00ba\u00bb\u0001\u0000\u0000\u0000"+ + "\u00bb\u00bc\u0001\u0000\u0000\u0000\u00bc\u00bd\u00051\u0000\u0000\u00bd"+ + "\u00be\u0003R)\u0000\u00be\u00c0\u0001\u0000\u0000\u0000\u00bf\u00b1\u0001"+ + "\u0000\u0000\u0000\u00bf\u00b8\u0001\u0000\u0000\u0000\u00c0\r\u0001\u0000"+ + "\u0000\u0000\u00c1\u00c7\u0003\u0010\b\u0000\u00c2\u00c3\u0003\u0010\b"+ + "\u0000\u00c3\u00c4\u0003T*\u0000\u00c4\u00c5\u0003\u0010\b\u0000\u00c5"+ + "\u00c7\u0001\u0000\u0000\u0000\u00c6\u00c1\u0001\u0000\u0000\u0000\u00c6"+ + "\u00c2\u0001\u0000\u0000\u0000\u00c7\u000f\u0001\u0000\u0000\u0000\u00c8"+ + "\u00c9\u0006\b\uffff\uffff\u0000\u00c9\u00cd\u0003\u0012\t\u0000\u00ca"+ + "\u00cb\u0007\u0000\u0000\u0000\u00cb\u00cd\u0003\u0010\b\u0003\u00cc\u00c8"+ + "\u0001\u0000\u0000\u0000\u00cc\u00ca\u0001\u0000\u0000\u0000\u00cd\u00d6"+ + "\u0001\u0000\u0000\u0000\u00ce\u00cf\n\u0002\u0000\u0000\u00cf\u00d0\u0007"+ + "\u0001\u0000\u0000\u00d0\u00d5\u0003\u0010\b\u0003\u00d1\u00d2\n\u0001"+ + "\u0000\u0000\u00d2\u00d3\u0007\u0000\u0000\u0000\u00d3\u00d5\u0003\u0010"+ + "\b\u0002\u00d4\u00ce\u0001\u0000\u0000\u0000\u00d4\u00d1\u0001\u0000\u0000"+ + "\u0000\u00d5\u00d8\u0001\u0000\u0000\u0000\u00d6\u00d4\u0001\u0000\u0000"+ + "\u0000\u00d6\u00d7\u0001\u0000\u0000\u0000\u00d7\u0011\u0001\u0000\u0000"+ + "\u0000\u00d8\u00d6\u0001\u0000\u0000\u0000\u00d9\u00e1\u00030\u0018\u0000"+ + "\u00da\u00e1\u0003(\u0014\u0000\u00db\u00e1\u0003\u0014\n\u0000\u00dc"+ + "\u00dd\u0005(\u0000\u0000\u00dd\u00de\u0003\n\u0005\u0000\u00de\u00df"+ + "\u00052\u0000\u0000\u00df\u00e1\u0001\u0000\u0000\u0000\u00e0\u00d9\u0001"+ + "\u0000\u0000\u0000\u00e0\u00da\u0001\u0000\u0000\u0000\u00e0\u00db\u0001"+ + "\u0000\u0000\u0000\u00e0\u00dc\u0001\u0000\u0000\u0000\u00e1\u0013\u0001"+ + "\u0000\u0000\u0000\u00e2\u00e3\u0003,\u0016\u0000\u00e3\u00ed\u0005(\u0000"+ + "\u0000\u00e4\u00ee\u0005<\u0000\u0000\u00e5\u00ea\u0003\n\u0005\u0000"+ + "\u00e6\u00e7\u0005\"\u0000\u0000\u00e7\u00e9\u0003\n\u0005\u0000\u00e8"+ + "\u00e6\u0001\u0000\u0000\u0000\u00e9\u00ec\u0001\u0000\u0000\u0000\u00ea"+ + "\u00e8\u0001\u0000\u0000\u0000\u00ea\u00eb\u0001\u0000\u0000\u0000\u00eb"+ + "\u00ee\u0001\u0000\u0000\u0000\u00ec\u00ea\u0001\u0000\u0000\u0000\u00ed"+ + "\u00e4\u0001\u0000\u0000\u0000\u00ed\u00e5\u0001\u0000\u0000\u0000\u00ed"+ + "\u00ee\u0001\u0000\u0000\u0000\u00ee\u00ef\u0001\u0000\u0000\u0000\u00ef"+ + "\u00f0\u00052\u0000\u0000\u00f0\u0015\u0001\u0000\u0000\u0000\u00f1\u00f2"+ + "\u0005\u000e\u0000\u0000\u00f2\u00f3\u0003\u0018\f\u0000\u00f3\u0017\u0001"+ + "\u0000\u0000\u0000\u00f4\u00f9\u0003\u001a\r\u0000\u00f5\u00f6\u0005\""+ + "\u0000\u0000\u00f6\u00f8\u0003\u001a\r\u0000\u00f7\u00f5\u0001\u0000\u0000"+ + "\u0000\u00f8\u00fb\u0001\u0000\u0000\u0000\u00f9\u00f7\u0001\u0000\u0000"+ + "\u0000\u00f9\u00fa\u0001\u0000\u0000\u0000\u00fa\u0019\u0001\u0000\u0000"+ + "\u0000\u00fb\u00f9\u0001\u0000\u0000\u0000\u00fc\u0102\u0003\n\u0005\u0000"+ + "\u00fd\u00fe\u0003(\u0014\u0000\u00fe\u00ff\u0005!\u0000\u0000\u00ff\u0100"+ + "\u0003\n\u0005\u0000\u0100\u0102\u0001\u0000\u0000\u0000\u0101\u00fc\u0001"+ + "\u0000\u0000\u0000\u0101\u00fd\u0001\u0000\u0000\u0000\u0102\u001b\u0001"+ + "\u0000\u0000\u0000\u0103\u0104\u0005\u0006\u0000\u0000\u0104\u0109\u0003"+ + "&\u0013\u0000\u0105\u0106\u0005\"\u0000\u0000\u0106\u0108\u0003&\u0013"+ + "\u0000\u0107\u0105\u0001\u0000\u0000\u0000\u0108\u010b\u0001\u0000\u0000"+ + "\u0000\u0109\u0107\u0001\u0000\u0000\u0000\u0109\u010a\u0001\u0000\u0000"+ + "\u0000\u010a\u010d\u0001\u0000\u0000\u0000\u010b\u0109\u0001\u0000\u0000"+ + "\u0000\u010c\u010e\u0003\u001e\u000f\u0000\u010d\u010c\u0001\u0000\u0000"+ + "\u0000\u010d\u010e\u0001\u0000\u0000\u0000\u010e\u001d\u0001\u0000\u0000"+ + "\u0000\u010f\u0110\u0005?\u0000\u0000\u0110\u0111\u0005F\u0000\u0000\u0111"+ + "\u0116\u0003&\u0013\u0000\u0112\u0113\u0005\"\u0000\u0000\u0113\u0115"+ + "\u0003&\u0013\u0000\u0114\u0112\u0001\u0000\u0000\u0000\u0115\u0118\u0001"+ + "\u0000\u0000\u0000\u0116\u0114\u0001\u0000\u0000\u0000\u0116\u0117\u0001"+ + "\u0000\u0000\u0000\u0117\u0119\u0001\u0000\u0000\u0000\u0118\u0116\u0001"+ + "\u0000\u0000\u0000\u0119\u011a\u0005@\u0000\u0000\u011a\u001f\u0001\u0000"+ + "\u0000\u0000\u011b\u011c\u0005\u0004\u0000\u0000\u011c\u011d\u0003\u0018"+ + "\f\u0000\u011d!\u0001\u0000\u0000\u0000\u011e\u0120\u0005\u0011\u0000"+ + "\u0000\u011f\u0121\u0003\u0018\f\u0000\u0120\u011f\u0001\u0000\u0000\u0000"+ + "\u0120\u0121\u0001\u0000\u0000\u0000\u0121\u0124\u0001\u0000\u0000\u0000"+ + "\u0122\u0123\u0005\u001e\u0000\u0000\u0123\u0125\u0003\u0018\f\u0000\u0124"+ + "\u0122\u0001\u0000\u0000\u0000\u0124\u0125\u0001\u0000\u0000\u0000\u0125"+ + "#\u0001\u0000\u0000\u0000\u0126\u0127\u0005\b\u0000\u0000\u0127\u012a"+ + "\u0003\u0018\f\u0000\u0128\u0129\u0005\u001e\u0000\u0000\u0129\u012b\u0003"+ + "\u0018\f\u0000\u012a\u0128\u0001\u0000\u0000\u0000\u012a\u012b\u0001\u0000"+ + "\u0000\u0000\u012b%\u0001\u0000\u0000\u0000\u012c\u012d\u0007\u0002\u0000"+ + "\u0000\u012d\'\u0001\u0000\u0000\u0000\u012e\u0133\u0003,\u0016\u0000"+ + "\u012f\u0130\u0005$\u0000\u0000\u0130\u0132\u0003,\u0016\u0000\u0131\u012f"+ + "\u0001\u0000\u0000\u0000\u0132\u0135\u0001\u0000\u0000\u0000\u0133\u0131"+ + "\u0001\u0000\u0000\u0000\u0133\u0134\u0001\u0000\u0000\u0000\u0134)\u0001"+ + "\u0000\u0000\u0000\u0135\u0133\u0001\u0000\u0000\u0000\u0136\u013b\u0003"+ + ".\u0017\u0000\u0137\u0138\u0005$\u0000\u0000\u0138\u013a\u0003.\u0017"+ + "\u0000\u0139\u0137\u0001\u0000\u0000\u0000\u013a\u013d\u0001\u0000\u0000"+ + "\u0000\u013b\u0139\u0001\u0000\u0000\u0000\u013b\u013c\u0001\u0000\u0000"+ + "\u0000\u013c+\u0001\u0000\u0000\u0000\u013d\u013b\u0001\u0000\u0000\u0000"+ + "\u013e\u013f\u0007\u0003\u0000\u0000\u013f-\u0001\u0000\u0000\u0000\u0140"+ + "\u0141\u0007\u0004\u0000\u0000\u0141/\u0001\u0000\u0000\u0000\u0142\u016d"+ + "\u0005-\u0000\u0000\u0143\u0144\u0003P(\u0000\u0144\u0145\u0005A\u0000"+ + "\u0000\u0145\u016d\u0001\u0000\u0000\u0000\u0146\u016d\u0003N\'\u0000"+ + "\u0147\u016d\u0003P(\u0000\u0148\u016d\u0003J%\u0000\u0149\u016d\u0005"+ + "0\u0000\u0000\u014a\u016d\u0003R)\u0000\u014b\u014c\u0005?\u0000\u0000"+ + "\u014c\u0151\u0003L&\u0000\u014d\u014e\u0005\"\u0000\u0000\u014e\u0150"+ + "\u0003L&\u0000\u014f\u014d\u0001\u0000\u0000\u0000\u0150\u0153\u0001\u0000"+ + "\u0000\u0000\u0151\u014f\u0001\u0000\u0000\u0000\u0151\u0152\u0001\u0000"+ + "\u0000\u0000\u0152\u0154\u0001\u0000\u0000\u0000\u0153\u0151\u0001\u0000"+ + "\u0000\u0000\u0154\u0155\u0005@\u0000\u0000\u0155\u016d\u0001\u0000\u0000"+ + "\u0000\u0156\u0157\u0005?\u0000\u0000\u0157\u015c\u0003J%\u0000\u0158"+ + "\u0159\u0005\"\u0000\u0000\u0159\u015b\u0003J%\u0000\u015a\u0158\u0001"+ + "\u0000\u0000\u0000\u015b\u015e\u0001\u0000\u0000\u0000\u015c\u015a\u0001"+ + "\u0000\u0000\u0000\u015c\u015d\u0001\u0000\u0000\u0000\u015d\u015f\u0001"+ + "\u0000\u0000\u0000\u015e\u015c\u0001\u0000\u0000\u0000\u015f\u0160\u0005"+ + "@\u0000\u0000\u0160\u016d\u0001\u0000\u0000\u0000\u0161\u0162\u0005?\u0000"+ + "\u0000\u0162\u0167\u0003R)\u0000\u0163\u0164\u0005\"\u0000\u0000\u0164"+ + "\u0166\u0003R)\u0000\u0165\u0163\u0001\u0000\u0000\u0000\u0166\u0169\u0001"+ + "\u0000\u0000\u0000\u0167\u0165\u0001\u0000\u0000\u0000\u0167\u0168\u0001"+ + "\u0000\u0000\u0000\u0168\u016a\u0001\u0000\u0000\u0000\u0169\u0167\u0001"+ + "\u0000\u0000\u0000\u016a\u016b\u0005@\u0000\u0000\u016b\u016d\u0001\u0000"+ + "\u0000\u0000\u016c\u0142\u0001\u0000\u0000\u0000\u016c\u0143\u0001\u0000"+ + "\u0000\u0000\u016c\u0146\u0001\u0000\u0000\u0000\u016c\u0147\u0001\u0000"+ + "\u0000\u0000\u016c\u0148\u0001\u0000\u0000\u0000\u016c\u0149\u0001\u0000"+ + "\u0000\u0000\u016c\u014a\u0001\u0000\u0000\u0000\u016c\u014b\u0001\u0000"+ + "\u0000\u0000\u016c\u0156\u0001\u0000\u0000\u0000\u016c\u0161\u0001\u0000"+ + "\u0000\u0000\u016d1\u0001\u0000\u0000\u0000\u016e\u016f\u0005\n\u0000"+ + "\u0000\u016f\u0170\u0005\u001c\u0000\u0000\u01703\u0001\u0000\u0000\u0000"+ + "\u0171\u0172\u0005\u0010\u0000\u0000\u0172\u0177\u00036\u001b\u0000\u0173"+ + "\u0174\u0005\"\u0000\u0000\u0174\u0176\u00036\u001b\u0000\u0175\u0173"+ + "\u0001\u0000\u0000\u0000\u0176\u0179\u0001\u0000\u0000\u0000\u0177\u0175"+ + "\u0001\u0000\u0000\u0000\u0177\u0178\u0001\u0000\u0000\u0000\u01785\u0001"+ + "\u0000\u0000\u0000\u0179\u0177\u0001\u0000\u0000\u0000\u017a\u017c\u0003"+ + "\n\u0005\u0000\u017b\u017d\u0007\u0005\u0000\u0000\u017c\u017b\u0001\u0000"+ + "\u0000\u0000\u017c\u017d\u0001\u0000\u0000\u0000\u017d\u0180\u0001\u0000"+ + "\u0000\u0000\u017e\u017f\u0005.\u0000\u0000\u017f\u0181\u0007\u0006\u0000"+ + "\u0000\u0180\u017e\u0001\u0000\u0000\u0000\u0180\u0181\u0001\u0000\u0000"+ + "\u0000\u01817\u0001\u0000\u0000\u0000\u0182\u0183\u0005\t\u0000\u0000"+ + "\u0183\u0188\u0003*\u0015\u0000\u0184\u0185\u0005\"\u0000\u0000\u0185"+ + "\u0187\u0003*\u0015\u0000\u0186\u0184\u0001\u0000\u0000\u0000\u0187\u018a"+ + "\u0001\u0000\u0000\u0000\u0188\u0186\u0001\u0000\u0000\u0000\u0188\u0189"+ + "\u0001\u0000\u0000\u0000\u0189\u0195\u0001\u0000\u0000\u0000\u018a\u0188"+ + "\u0001\u0000\u0000\u0000\u018b\u018c\u0005\f\u0000\u0000\u018c\u0191\u0003"+ + "*\u0015\u0000\u018d\u018e\u0005\"\u0000\u0000\u018e\u0190\u0003*\u0015"+ + "\u0000\u018f\u018d\u0001\u0000\u0000\u0000\u0190\u0193\u0001\u0000\u0000"+ + "\u0000\u0191\u018f\u0001\u0000\u0000\u0000\u0191\u0192\u0001\u0000\u0000"+ + "\u0000\u0192\u0195\u0001\u0000\u0000\u0000\u0193\u0191\u0001\u0000\u0000"+ + "\u0000\u0194\u0182\u0001\u0000\u0000\u0000\u0194\u018b\u0001\u0000\u0000"+ + "\u0000\u01959\u0001\u0000\u0000\u0000\u0196\u0197\u0005\u0002\u0000\u0000"+ + "\u0197\u019c\u0003*\u0015\u0000\u0198\u0199\u0005\"\u0000\u0000\u0199"+ + "\u019b\u0003*\u0015\u0000\u019a\u0198\u0001\u0000\u0000\u0000\u019b\u019e"+ + "\u0001\u0000\u0000\u0000\u019c\u019a\u0001\u0000\u0000\u0000\u019c\u019d"+ + "\u0001\u0000\u0000\u0000\u019d;\u0001\u0000\u0000\u0000\u019e\u019c\u0001"+ + "\u0000\u0000\u0000\u019f\u01a0\u0005\r\u0000\u0000\u01a0\u01a5\u0003>"+ + "\u001f\u0000\u01a1\u01a2\u0005\"\u0000\u0000\u01a2\u01a4\u0003>\u001f"+ + "\u0000\u01a3\u01a1\u0001\u0000\u0000\u0000\u01a4\u01a7\u0001\u0000\u0000"+ + "\u0000\u01a5\u01a3\u0001\u0000\u0000\u0000\u01a5\u01a6\u0001\u0000\u0000"+ + "\u0000\u01a6=\u0001\u0000\u0000\u0000\u01a7\u01a5\u0001\u0000\u0000\u0000"+ + "\u01a8\u01a9\u0003*\u0015\u0000\u01a9\u01aa\u0005O\u0000\u0000\u01aa\u01ab"+ + "\u0003*\u0015\u0000\u01ab?\u0001\u0000\u0000\u0000\u01ac\u01ad\u0005\u0001"+ + "\u0000\u0000\u01ad\u01ae\u0003\u0012\t\u0000\u01ae\u01b0\u0003R)\u0000"+ + "\u01af\u01b1\u0003F#\u0000\u01b0\u01af\u0001\u0000\u0000\u0000\u01b0\u01b1"+ + "\u0001\u0000\u0000\u0000\u01b1A\u0001\u0000\u0000\u0000\u01b2\u01b3\u0005"+ + "\u0007\u0000\u0000\u01b3\u01b4\u0003\u0012\t\u0000\u01b4\u01b5\u0003R"+ + ")\u0000\u01b5C\u0001\u0000\u0000\u0000\u01b6\u01b7\u0005\u000b\u0000\u0000"+ + "\u01b7\u01b8\u0003(\u0014\u0000\u01b8E\u0001\u0000\u0000\u0000\u01b9\u01be"+ + "\u0003H$\u0000\u01ba\u01bb\u0005\"\u0000\u0000\u01bb\u01bd\u0003H$\u0000"+ + "\u01bc\u01ba\u0001\u0000\u0000\u0000\u01bd\u01c0\u0001\u0000\u0000\u0000"+ + "\u01be\u01bc\u0001\u0000\u0000\u0000\u01be\u01bf\u0001\u0000\u0000\u0000"+ + "\u01bfG\u0001\u0000\u0000\u0000\u01c0\u01be\u0001\u0000\u0000\u0000\u01c1"+ + "\u01c2\u0003,\u0016\u0000\u01c2\u01c3\u0005!\u0000\u0000\u01c3\u01c4\u0003"+ + "0\u0018\u0000\u01c4I\u0001\u0000\u0000\u0000\u01c5\u01c6\u0007\u0007\u0000"+ + "\u0000\u01c6K\u0001\u0000\u0000\u0000\u01c7\u01ca\u0003N\'\u0000\u01c8"+ + "\u01ca\u0003P(\u0000\u01c9\u01c7\u0001\u0000\u0000\u0000\u01c9\u01c8\u0001"+ + "\u0000\u0000\u0000\u01caM\u0001\u0000\u0000\u0000\u01cb\u01cd\u0007\u0000"+ + "\u0000\u0000\u01cc\u01cb\u0001\u0000\u0000\u0000\u01cc\u01cd\u0001\u0000"+ + "\u0000\u0000\u01cd\u01ce\u0001\u0000\u0000\u0000\u01ce\u01cf\u0005\u001d"+ + "\u0000\u0000\u01cfO\u0001\u0000\u0000\u0000\u01d0\u01d2\u0007\u0000\u0000"+ + "\u0000\u01d1\u01d0\u0001\u0000\u0000\u0000\u01d1\u01d2\u0001\u0000\u0000"+ + "\u0000\u01d2\u01d3\u0001\u0000\u0000\u0000\u01d3\u01d4\u0005\u001c\u0000"+ + "\u0000\u01d4Q\u0001\u0000\u0000\u0000\u01d5\u01d6\u0005\u001b\u0000\u0000"+ + "\u01d6S\u0001\u0000\u0000\u0000\u01d7\u01d8\u0007\b\u0000\u0000\u01d8"+ + "U\u0001\u0000\u0000\u0000\u01d9\u01da\u0005\u0005\u0000\u0000\u01da\u01db"+ + "\u0003X,\u0000\u01dbW\u0001\u0000\u0000\u0000\u01dc\u01dd\u0005?\u0000"+ + "\u0000\u01dd\u01de\u0003\u0002\u0001\u0000\u01de\u01df\u0005@\u0000\u0000"+ + "\u01dfY\u0001\u0000\u0000\u0000\u01e0\u01e1\u0005\u000f\u0000\u0000\u01e1"+ + "\u01e5\u0005_\u0000\u0000\u01e2\u01e3\u0005\u000f\u0000\u0000\u01e3\u01e5"+ + "\u0005`\u0000\u0000\u01e4\u01e0\u0001\u0000\u0000\u0000\u01e4\u01e2\u0001"+ + "\u0000\u0000\u0000\u01e5[\u0001\u0000\u0000\u0000\u01e6\u01ea\u0005\u0003"+ + "\u0000\u0000\u01e7\u01e9\u0003`0\u0000\u01e8\u01e7\u0001\u0000\u0000\u0000"+ + "\u01e9\u01ec\u0001\u0000\u0000\u0000\u01ea\u01e8\u0001\u0000\u0000\u0000"+ + "\u01ea\u01eb\u0001\u0000\u0000\u0000\u01eb\u01ed\u0001\u0000\u0000\u0000"+ + "\u01ec\u01ea\u0001\u0000\u0000\u0000\u01ed\u01f0\u0005U\u0000\u0000\u01ee"+ + "\u01ef\u0005S\u0000\u0000\u01ef\u01f1\u0003*\u0015\u0000\u01f0\u01ee\u0001"+ + "\u0000\u0000\u0000\u01f0\u01f1\u0001\u0000\u0000\u0000\u01f1\u01fb\u0001"+ + "\u0000\u0000\u0000\u01f2\u01f3\u0005T\u0000\u0000\u01f3\u01f8\u0003^/"+ + "\u0000\u01f4\u01f5\u0005\"\u0000\u0000\u01f5\u01f7\u0003^/\u0000\u01f6"+ + "\u01f4\u0001\u0000\u0000\u0000\u01f7\u01fa\u0001\u0000\u0000\u0000\u01f8"+ + "\u01f6\u0001\u0000\u0000\u0000\u01f8\u01f9\u0001\u0000\u0000\u0000\u01f9"+ + "\u01fc\u0001\u0000\u0000\u0000\u01fa\u01f8\u0001\u0000\u0000\u0000\u01fb"+ + "\u01f2\u0001\u0000\u0000\u0000\u01fb\u01fc\u0001\u0000\u0000\u0000\u01fc"+ + "]\u0001\u0000\u0000\u0000\u01fd\u01fe\u0003*\u0015\u0000\u01fe\u01ff\u0005"+ + "!\u0000\u0000\u01ff\u0201\u0001\u0000\u0000\u0000\u0200\u01fd\u0001\u0000"+ + "\u0000\u0000\u0200\u0201\u0001\u0000\u0000\u0000\u0201\u0202\u0001\u0000"+ + "\u0000\u0000\u0202\u0203\u0003*\u0015\u0000\u0203_\u0001\u0000\u0000\u0000"+ + "\u0204\u0205\u0005?\u0000\u0000\u0205\u0206\u0005e\u0000\u0000\u0206\u0207"+ + "\u0005d\u0000\u0000\u0207\u0208\u0005e\u0000\u0000\u0208\u0209\u0005@"+ + "\u0000\u0000\u0209a\u0001\u0000\u0000\u00004mt\u0083\u008f\u0098\u00a0"+ + "\u00a4\u00ac\u00ae\u00b3\u00ba\u00bf\u00c6\u00cc\u00d4\u00d6\u00e0\u00ea"+ + "\u00ed\u00f9\u0101\u0109\u010d\u0116\u0120\u0124\u012a\u0133\u013b\u0151"+ + "\u015c\u0167\u016c\u0177\u017c\u0180\u0188\u0191\u0194\u019c\u01a5\u01b0"+ + "\u01be\u01c9\u01cc\u01d1\u01e4\u01ea\u01f0\u01f8\u01fb\u0200"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseListener.java index c8b86b75d6c16..40946a2236d2f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseListener.java @@ -384,18 +384,6 @@ public class EsqlBaseParserBaseListener implements EsqlBaseParserListener { *

    The default implementation does nothing.

    */ @Override public void exitInlinestatsCommand(EsqlBaseParser.InlinestatsCommandContext ctx) { } - /** - * {@inheritDoc} - * - *

    The default implementation does nothing.

    - */ - @Override public void enterGrouping(EsqlBaseParser.GroupingContext ctx) { } - /** - * {@inheritDoc} - * - *

    The default implementation does nothing.

    - */ - @Override public void exitGrouping(EsqlBaseParser.GroupingContext ctx) { } /** * {@inheritDoc} * diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseVisitor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseVisitor.java index 1b825dd9c212f..43c30c0a063cf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseVisitor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseVisitor.java @@ -229,13 +229,6 @@ public class EsqlBaseParserBaseVisitor extends AbstractParseTreeVisitor im * {@link #visitChildren} on {@code ctx}.

    */ @Override public T visitInlinestatsCommand(EsqlBaseParser.InlinestatsCommandContext ctx) { return visitChildren(ctx); } - /** - * {@inheritDoc} - * - *

    The default implementation returns the result of calling - * {@link #visitChildren} on {@code ctx}.

    - */ - @Override public T visitGrouping(EsqlBaseParser.GroupingContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java index 89c2e39b65f8d..712227ab36787 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java @@ -351,16 +351,6 @@ public interface EsqlBaseParserListener extends ParseTreeListener { * @param ctx the parse tree */ void exitInlinestatsCommand(EsqlBaseParser.InlinestatsCommandContext ctx); - /** - * Enter a parse tree produced by {@link EsqlBaseParser#grouping}. - * @param ctx the parse tree - */ - void enterGrouping(EsqlBaseParser.GroupingContext ctx); - /** - * Exit a parse tree produced by {@link EsqlBaseParser#grouping}. - * @param ctx the parse tree - */ - void exitGrouping(EsqlBaseParser.GroupingContext ctx); /** * Enter a parse tree produced by {@link EsqlBaseParser#fromIdentifier}. * @param ctx the parse tree diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java index 0fc4fecc4a2df..d5c871641f3b7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java @@ -213,12 +213,6 @@ public interface EsqlBaseParserVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitInlinestatsCommand(EsqlBaseParser.InlinestatsCommandContext ctx); - /** - * Visit a parse tree produced by {@link EsqlBaseParser#grouping}. - * @param ctx the parse tree - * @return the visitor result - */ - T visitGrouping(EsqlBaseParser.GroupingContext ctx); /** * Visit a parse tree produced by {@link EsqlBaseParser#fromIdentifier}. * @param ctx the parse tree diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java index 9875979808f0b..9c9e7d8bd0066 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java @@ -33,6 +33,7 @@ import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.expression.Alias; +import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.NamedExpression; @@ -58,12 +59,12 @@ import java.time.Duration; import java.time.ZoneId; import java.time.temporal.TemporalAmount; +import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.function.BiFunction; -import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.parseTemporalAmout; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.DATE_PERIOD; @@ -409,13 +410,50 @@ private NamedExpression enrichFieldName(EsqlBaseParser.QualifiedNamePatternConte public Alias visitField(EsqlBaseParser.FieldContext ctx) { UnresolvedAttribute id = visitQualifiedName(ctx.qualifiedName()); Expression value = expression(ctx.booleanExpression()); - String name = id == null ? ctx.getText() : id.qualifiedName(); - return new Alias(source(ctx), name, value); + var source = source(ctx); + String name = id == null ? source.text() : id.qualifiedName(); + return new Alias(source, name, value); } @Override - public List visitGrouping(EsqlBaseParser.GroupingContext ctx) { - return ctx != null ? visitList(this, ctx.qualifiedName(), NamedExpression.class) : emptyList(); + public List visitFields(EsqlBaseParser.FieldsContext ctx) { + return ctx != null ? visitList(this, ctx.field(), Alias.class) : new ArrayList<>(); + } + + /** + * Similar to {@link #visitFields(EsqlBaseParser.FieldsContext)} however avoids wrapping the exception + * into an Alias. + */ + public List visitGrouping(EsqlBaseParser.FieldsContext ctx) { + List list; + if (ctx != null) { + var fields = ctx.field(); + list = new ArrayList<>(fields.size()); + for (EsqlBaseParser.FieldContext field : fields) { + NamedExpression ne = null; + UnresolvedAttribute id = visitQualifiedName(field.qualifiedName()); + Expression value = expression(field.booleanExpression()); + String name = null; + if (id == null) { + // when no alias has been specified, see if the underling one can be reused + if (value instanceof Attribute a) { + ne = a; + } else { + name = source(field).text(); + } + } else { + name = id.qualifiedName(); + } + // wrap when necessary - no alias and no underlying attribute + if (ne == null) { + ne = new Alias(source(ctx), name, value); + } + list.add(ne); + } + } else { + list = new ArrayList<>(); + } + return list; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index 5e90f6e8e44c9..b722b91225c8b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -53,6 +53,7 @@ import java.util.Arrays; import java.util.HashMap; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -197,14 +198,14 @@ public LogicalPlan visitFromCommand(EsqlBaseParser.FromCommandContext ctx) { @Override public PlanFactory visitStatsCommand(EsqlBaseParser.StatsCommandContext ctx) { - List aggregates = new ArrayList<>(visitFields(ctx.fields())); - List groupings = visitGrouping(ctx.grouping()); + List aggregates = new ArrayList<>(visitFields(ctx.stats)); + List groupings = visitGrouping(ctx.grouping); if (aggregates.isEmpty() && groupings.isEmpty()) { throw new ParsingException(source(ctx), "At least one aggregation or grouping expression required in [{}]", ctx.getText()); } // grouping keys are automatically added as aggregations however the user is not allowed to specify them if (groupings.isEmpty() == false && aggregates.isEmpty() == false) { - var groupNames = Expressions.names(groupings); + var groupNames = new LinkedHashSet<>(Expressions.names(Expressions.references(groupings))); for (NamedExpression aggregate : aggregates) { if (aggregate instanceof Alias a && a.child() instanceof UnresolvedAttribute ua && groupNames.contains(ua.name())) { @@ -218,8 +219,8 @@ public PlanFactory visitStatsCommand(EsqlBaseParser.StatsCommandContext ctx) { @Override public PlanFactory visitInlinestatsCommand(EsqlBaseParser.InlinestatsCommandContext ctx) { - List aggregates = new ArrayList<>(visitFields(ctx.fields())); - List groupings = visitGrouping(ctx.grouping()); + List aggregates = new ArrayList<>(visitFields(ctx.stats)); + List groupings = visitGrouping(ctx.grouping); aggregates.addAll(groupings); return input -> new InlineStats(source(ctx), input, new ArrayList<>(groupings), aggregates); } @@ -230,11 +231,6 @@ public PlanFactory visitWhereCommand(EsqlBaseParser.WhereCommandContext ctx) { return input -> new Filter(source(ctx), input, expression); } - @Override - public List visitFields(EsqlBaseParser.FieldsContext ctx) { - return ctx != null ? visitList(this, ctx.field(), Alias.class) : new ArrayList<>(); - } - @Override public PlanFactory visitLimitCommand(EsqlBaseParser.LimitCommandContext ctx) { Source source = source(ctx); @@ -330,6 +326,7 @@ public PlanFactory visitEnrichCommand(EsqlBaseParser.EnrichCommandContext ctx) { new Literal(source(ctx.policyName), policyName, DataTypes.KEYWORD), matchField, null, + Map.of(), keepClauses.isEmpty() ? List.of() : keepClauses ); }; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java index 37a0ff0fe5001..d5db90aa07325 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.esql.plan.logical; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; +import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.ql.capabilities.Resolvables; import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.EmptyAttribute; @@ -29,8 +29,9 @@ public class Enrich extends UnaryPlan { private final Expression policyName; private final NamedExpression matchField; - private final EnrichPolicyResolution policy; - private List enrichFields; + private final EnrichPolicy policy; + private final Map concreteIndices; // cluster -> enrich indices + private final List enrichFields; private List output; private final Mode mode; @@ -61,7 +62,8 @@ public Enrich( Mode mode, Expression policyName, NamedExpression matchField, - EnrichPolicyResolution policy, + EnrichPolicy policy, + Map concreteIndices, List enrichFields ) { super(source, child); @@ -69,6 +71,7 @@ public Enrich( this.policyName = policyName; this.matchField = matchField; this.policy = policy; + this.concreteIndices = concreteIndices; this.enrichFields = enrichFields; } @@ -80,10 +83,14 @@ public List enrichFields() { return enrichFields; } - public EnrichPolicyResolution policy() { + public EnrichPolicy policy() { return policy; } + public Map concreteIndices() { + return concreteIndices; + } + public Expression policyName() { return policyName; } @@ -102,12 +109,12 @@ public boolean expressionsResolved() { @Override public UnaryPlan replaceChild(LogicalPlan newChild) { - return new Enrich(source(), newChild, mode, policyName, matchField, policy, enrichFields); + return new Enrich(source(), newChild, mode, policyName, matchField, policy, concreteIndices, enrichFields); } @Override protected NodeInfo info() { - return NodeInfo.create(this, Enrich::new, child(), mode, policyName, matchField, policy, enrichFields); + return NodeInfo.create(this, Enrich::new, child(), mode, policyName, matchField, policy, concreteIndices, enrichFields); } @Override @@ -131,11 +138,12 @@ public boolean equals(Object o) { && Objects.equals(policyName, enrich.policyName) && Objects.equals(matchField, enrich.matchField) && Objects.equals(policy, enrich.policy) + && Objects.equals(concreteIndices, enrich.concreteIndices) && Objects.equals(enrichFields, enrich.enrichFields); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), mode, policyName, matchField, policy, enrichFields); + return Objects.hash(super.hashCode(), mode, policyName, matchField, policy, concreteIndices, enrichFields); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java index 6f2b83ef0aa6f..0bfaa2db2be5d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java @@ -6,23 +6,25 @@ */ package org.elasticsearch.xpack.esql.plan.physical; +import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.NamedExpression; -import org.elasticsearch.xpack.ql.index.EsIndex; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; import java.util.List; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputAttributes; public class EnrichExec extends UnaryExec implements EstimatesRowSize { + private final Enrich.Mode mode; private final NamedExpression matchField; private final String policyName; private final String policyMatchField; - private final EsIndex enrichIndex; + private final Map concreteIndices; // cluster -> enrich index private final List enrichFields; /** @@ -32,42 +34,58 @@ public class EnrichExec extends UnaryExec implements EstimatesRowSize { * @param matchField the match field in the source data * @param policyName the enrich policy name * @param policyMatchField the match field name in the policy - * @param enrichIndex the enricy policy index (the system index created by the policy execution, not the source index) + * @param concreteIndices a map from cluster to concrete enrich indices * @param enrichFields the enrich fields */ public EnrichExec( Source source, PhysicalPlan child, + Enrich.Mode mode, NamedExpression matchField, String policyName, String policyMatchField, - EsIndex enrichIndex, + Map concreteIndices, List enrichFields ) { super(source, child); + this.mode = mode; this.matchField = matchField; this.policyName = policyName; this.policyMatchField = policyMatchField; - this.enrichIndex = enrichIndex; + this.concreteIndices = concreteIndices; this.enrichFields = enrichFields; } @Override protected NodeInfo info() { - return NodeInfo.create(this, EnrichExec::new, child(), matchField, policyName, policyMatchField, enrichIndex, enrichFields); + return NodeInfo.create( + this, + EnrichExec::new, + child(), + mode, + matchField, + policyName, + policyMatchField, + concreteIndices, + enrichFields + ); } @Override public EnrichExec replaceChild(PhysicalPlan newChild) { - return new EnrichExec(source(), newChild, matchField, policyName, policyMatchField, enrichIndex, enrichFields); + return new EnrichExec(source(), newChild, mode, matchField, policyName, policyMatchField, concreteIndices, enrichFields); + } + + public Enrich.Mode mode() { + return mode; } public NamedExpression matchField() { return matchField; } - public EsIndex enrichIndex() { - return enrichIndex; + public Map concreteIndices() { + return concreteIndices; } public List enrichFields() { @@ -99,15 +117,16 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; if (super.equals(o) == false) return false; EnrichExec that = (EnrichExec) o; - return Objects.equals(matchField, that.matchField) + return mode.equals(that.mode) + && Objects.equals(matchField, that.matchField) && Objects.equals(policyName, that.policyName) && Objects.equals(policyMatchField, that.policyMatchField) - && Objects.equals(enrichIndex, that.enrichIndex) + && Objects.equals(concreteIndices, that.concreteIndices) && Objects.equals(enrichFields, that.enrichFields); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), matchField, policyName, policyMatchField, enrichIndex, enrichFields); + return Objects.hash(super.hashCode(), mode, matchField, policyName, policyMatchField, concreteIndices, enrichFields); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index d79becfc8a736..8c9ab8afe41f9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -9,7 +9,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.compute.Describable; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -93,7 +92,6 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; -import java.util.Set; import java.util.function.Function; import java.util.stream.Stream; @@ -110,6 +108,7 @@ public class LocalExecutionPlanner { private static final Logger logger = LogManager.getLogger(LocalExecutionPlanner.class); private final String sessionId; + private final String clusterAlias; private final CancellableTask parentTask; private final BigArrays bigArrays; private final BlockFactory blockFactory; @@ -122,6 +121,7 @@ public class LocalExecutionPlanner { public LocalExecutionPlanner( String sessionId, + String clusterAlias, CancellableTask parentTask, BigArrays bigArrays, BlockFactory blockFactory, @@ -133,6 +133,7 @@ public LocalExecutionPlanner( PhysicalOperationProviders physicalOperationProviders ) { this.sessionId = sessionId; + this.clusterAlias = clusterAlias; this.parentTask = parentTask; this.bigArrays = bigArrays; this.blockFactory = blockFactory; @@ -343,7 +344,7 @@ private PhysicalOperation planTopN(TopNExec topNExec, LocalExecutionPlannerConte case "version" -> TopNEncoder.VERSION; case "boolean", "null", "byte", "short", "integer", "long", "double", "float", "half_float", "datetime", "date_period", "time_duration", "object", "nested", "scaled_float", "unsigned_long", "_doc" -> TopNEncoder.DEFAULT_SORTABLE; - case "geo_point", "cartesian_point" -> TopNEncoder.DEFAULT_UNSORTABLE; + case "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" -> TopNEncoder.DEFAULT_UNSORTABLE; // unsupported fields are encoded as BytesRef, we'll use the same encoder; all values should be null at this point case "unsupported" -> TopNEncoder.UNSUPPORTED; default -> throw new EsqlIllegalArgumentException("No TopN sorting encoder for type " + inverse.get(channel).type()); @@ -456,11 +457,10 @@ private PhysicalOperation planEnrich(EnrichExec enrich, LocalExecutionPlannerCon Layout.Builder layoutBuilder = source.layout.builder(); layoutBuilder.append(enrich.enrichFields()); Layout layout = layoutBuilder.build(); - Set indices = enrich.enrichIndex().concreteIndices(); - if (indices.size() != 1) { - throw new EsqlIllegalArgumentException("Resolved enrich should have one concrete index; got " + indices); + String enrichIndex = enrich.concreteIndices().get(clusterAlias); + if (enrichIndex == null) { + throw new EsqlIllegalArgumentException("No concrete enrich index for cluster [" + clusterAlias + "]"); } - String enrichIndex = Iterables.get(indices, 0); return source.with( new EnrichLookupOperator.Factory( sessionId, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java index 3eea84b0bd1f9..9410e9e97d078 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.planner; +import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Enrich; @@ -142,10 +143,11 @@ private PhysicalPlan map(UnaryPlan p, PhysicalPlan child) { return new EnrichExec( enrich.source(), child, + enrich.mode(), enrich.matchField(), - enrich.policy().policyName(), - enrich.policy().policy().getMatchField(), - enrich.policy().index().get(), + BytesRefs.toString(enrich.policyName().fold()), + enrich.policy().getMatchField(), + enrich.concreteIndices(), enrich.enrichFields() ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java index 1c20e55f289c3..933b0174aebc0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java @@ -197,11 +197,10 @@ static QueryBuilder detectFilter(PhysicalPlan plan, String fieldName, Predicate< /** * Map QL's {@link DataType} to the compute engine's {@link ElementType}, for sortable types only. - * This specifically excludes GEO_POINT and CARTESIAN_POINT, which are backed by DataType.LONG - * but are not themselves sortable (the long can be sorted, but the sort order is not usually useful). + * This specifically excludes spatial data types, which are not themselves sortable. */ public static ElementType toSortableElementType(DataType dataType) { - if (dataType == EsqlDataTypes.GEO_POINT || dataType == EsqlDataTypes.CARTESIAN_POINT) { + if (EsqlDataTypes.isSpatial(dataType)) { return ElementType.UNKNOWN; } return toElementType(dataType); @@ -238,11 +237,7 @@ public static ElementType toElementType(DataType dataType) { if (dataType == EsQueryExec.DOC_DATA_TYPE) { return ElementType.DOC; } - // TODO: Spatial types can be read from source into BYTES_REF, or read from doc-values into LONG - if (dataType == EsqlDataTypes.GEO_POINT) { - return ElementType.BYTES_REF; - } - if (dataType == EsqlDataTypes.CARTESIAN_POINT) { + if (EsqlDataTypes.isSpatial(dataType)) { return ElementType.BYTES_REF; } throw EsqlIllegalArgumentException.illegalDataType(dataType); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index e781ed4a60c35..172fc0a3dc5cc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -155,7 +155,14 @@ public void execute( .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planConcreteIndices(physicalPlan).toArray(String[]::new)); QueryPragmas queryPragmas = configuration.pragmas(); if (dataNodePlan == null || clusterToConcreteIndices.values().stream().allMatch(v -> v.indices().length == 0)) { - var computeContext = new ComputeContext(sessionId, List.of(), configuration, null, null); + var computeContext = new ComputeContext( + sessionId, + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + List.of(), + configuration, + null, + null + ); runCompute( rootTask, computeContext, @@ -187,7 +194,7 @@ public void execute( // run compute on the coordinator runCompute( rootTask, - new ComputeContext(sessionId, List.of(), configuration, exchangeSource, null), + new ComputeContext(sessionId, RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, List.of(), configuration, exchangeSource, null), coordinatorPlan, cancelOnFailure(rootTask, cancelled, refs.acquire()).map(driverProfiles -> { responseHeadersCollector.collect(); @@ -378,6 +385,7 @@ void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, try { LocalExecutionPlanner planner = new LocalExecutionPlanner( context.sessionId, + context.clusterAlias, task, bigArrays, blockFactory, @@ -570,13 +578,14 @@ public void messageReceived(DataNodeRequest request, TransportChannel channel, T ); final ActionListener listener = new ChannelActionListener<>(channel); final EsqlConfiguration configuration = request.configuration(); + String clusterAlias = request.clusterAlias(); acquireSearchContexts( - request.clusterAlias(), + clusterAlias, request.shardIds(), configuration, request.aliasFilters(), ActionListener.wrap(searchContexts -> { - var computeContext = new ComputeContext(sessionId, searchContexts, configuration, null, exchangeSink); + var computeContext = new ComputeContext(sessionId, clusterAlias, searchContexts, configuration, null, exchangeSink); runCompute(parentTask, computeContext, request.plan(), ActionListener.wrap(driverProfiles -> { // don't return until all pages are fetched exchangeSink.addCompletionListener( @@ -669,7 +678,7 @@ void runComputeOnRemoteCluster( ); runCompute( parentTask, - new ComputeContext(localSessionId, List.of(), configuration, exchangeSource, exchangeSink), + new ComputeContext(localSessionId, clusterAlias, List.of(), configuration, exchangeSource, exchangeSink), coordinatorPlan, cancelOnFailure(parentTask, cancelled, refs.acquire()).map(driverProfiles -> { responseHeadersCollector.collect(); @@ -702,6 +711,7 @@ void runComputeOnRemoteCluster( record ComputeContext( String sessionId, + String clusterAlias, List searchContexts, EsqlConfiguration configuration, ExchangeSourceHandler exchangeSource, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index 284c78c6e0121..add6a0d24994c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -9,12 +9,11 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.fieldcaps.FieldCapabilities; -import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; @@ -52,7 +51,6 @@ import org.elasticsearch.xpack.ql.type.InvalidMappedField; import org.elasticsearch.xpack.ql.util.Holder; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -150,32 +148,18 @@ public void analyzedPlan(LogicalPlan parsed, ActionListener listene private void preAnalyze(LogicalPlan parsed, BiFunction action, ActionListener listener) { PreAnalyzer.PreAnalysis preAnalysis = preAnalyzer.preAnalyze(parsed); - Set policyNames = new HashSet<>(preAnalysis.policyNames); - EnrichResolution resolution = new EnrichResolution(ConcurrentCollections.newConcurrentSet(), enrichPolicyResolver.allPolicyNames()); - - ActionListener groupedListener = listener.delegateFailureAndWrap((l, unused) -> { - assert resolution.resolvedPolicies().size() == policyNames.size() - : resolution.resolvedPolicies().size() + " != " + policyNames.size(); - + enrichPolicyResolver.resolvePolicy(preAnalysis.policyNames, listener.delegateFailureAndWrap((l, enrichResolution) -> { // first we need the match_fields names from enrich policies and THEN, with an updated list of fields, we call field_caps API - var matchFields = resolution.resolvedPolicies() - .stream() - .filter(p -> p.index().isValid()) // only if the policy by the specified name was found; later the Verifier will be - // triggered - .map(p -> p.policy().getMatchField()) + var matchFields = enrichResolution.resolvedEnrichPolicies() + .stream() // triggered + .map(EnrichPolicy::getMatchField) .collect(Collectors.toSet()); - preAnalyzeIndices( parsed, - l.delegateFailureAndWrap((ll, indexResolution) -> ll.onResponse(action.apply(indexResolution, resolution))), + l.delegateFailureAndWrap((ll, indexResolution) -> ll.onResponse(action.apply(indexResolution, enrichResolution))), matchFields ); - }); - try (RefCountingListener refs = new RefCountingListener(groupedListener)) { - for (String policyName : policyNames) { - enrichPolicyResolver.resolvePolicy(policyName, refs.acquire(resolution.resolvedPolicies()::add)); - } - } + })); } private void preAnalyzeIndices(LogicalPlan parsed, ActionListener listener, Set enrichPolicyMatchFields) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java index eae808abb5037..e8cc5a77291bb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java @@ -47,6 +47,8 @@ public final class EsqlDataTypes { public static final DataType TIME_DURATION = new DataType("TIME_DURATION", null, Integer.BYTES + Long.BYTES, false, false, false); public static final DataType GEO_POINT = new DataType("geo_point", Double.BYTES * 2, false, false, false); public static final DataType CARTESIAN_POINT = new DataType("cartesian_point", Double.BYTES * 2, false, false, false); + public static final DataType GEO_SHAPE = new DataType("geo_shape", Integer.MAX_VALUE, false, false, false); + public static final DataType CARTESIAN_SHAPE = new DataType("cartesian_shape", Integer.MAX_VALUE, false, false, false); private static final Collection TYPES = Stream.of( BOOLEAN, @@ -72,7 +74,9 @@ public final class EsqlDataTypes { VERSION, UNSIGNED_LONG, GEO_POINT, - CARTESIAN_POINT + CARTESIAN_POINT, + CARTESIAN_SHAPE, + GEO_SHAPE ).sorted(Comparator.comparing(DataType::typeName)).toList(); private static final Map NAME_TO_TYPE = TYPES.stream().collect(toUnmodifiableMap(DataType::typeName, t -> t)); @@ -83,6 +87,7 @@ public final class EsqlDataTypes { Map map = TYPES.stream().filter(e -> e.esType() != null).collect(toMap(DataType::esType, t -> t)); // ES calls this 'point', but ESQL calls it 'cartesian_point' map.put("point", CARTESIAN_POINT); + map.put("shape", CARTESIAN_SHAPE); ES_TO_TYPE = Collections.unmodifiableMap(map); } @@ -167,7 +172,7 @@ public static boolean isNullOrTimeDuration(DataType t) { } public static boolean isSpatial(DataType t) { - return t == GEO_POINT || t == CARTESIAN_POINT; + return t == GEO_POINT || t == CARTESIAN_POINT || t == GEO_SHAPE || t == CARTESIAN_SHAPE; } /** diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 39a7eee2e616d..0f6dbfb81f141 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.Driver; @@ -39,6 +40,7 @@ import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; @@ -48,7 +50,6 @@ import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.optimizer.LocalLogicalOptimizerContext; import org.elasticsearch.xpack.esql.optimizer.LocalLogicalPlanOptimizer; @@ -89,7 +90,6 @@ import java.net.URL; import java.util.ArrayList; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -263,18 +263,18 @@ private static IndexResolution loadIndexResolution(String mappingName, String in } private static EnrichResolution loadEnrichPolicies() { - Set names = new HashSet<>(); - Set resolutions = new HashSet<>(); + EnrichResolution enrichResolution = new EnrichResolution(); for (CsvTestsDataLoader.EnrichConfig policyConfig : CsvTestsDataLoader.ENRICH_POLICIES) { EnrichPolicy policy = loadEnrichPolicyMapping(policyConfig.policyFileName()); CsvTestsDataLoader.TestsDataset sourceIndex = CSV_DATASET_MAP.get(policy.getIndices().get(0)); // this could practically work, but it's wrong: // EnrichPolicyResolution should contain the policy (system) index, not the source index - IndexResolution idxRes = loadIndexResolution(sourceIndex.mappingFileName(), sourceIndex.indexName()); - names.add(policyConfig.policyName()); - resolutions.add(new EnrichPolicyResolution(policyConfig.policyName(), policy, idxRes)); + EsIndex esIndex = loadIndexResolution(sourceIndex.mappingFileName(), sourceIndex.indexName()).get(); + var concreteIndices = Map.of(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY, Iterables.get(esIndex.concreteIndices(), 0)); + enrichResolution.addResolvedPolicy(policyConfig.policyName(), policy, concreteIndices, esIndex.mapping()); + enrichResolution.addExistingPolicies(Set.of(policyConfig.policyName())); } - return new EnrichResolution(resolutions, names); + return enrichResolution; } private static EnrichPolicy loadEnrichPolicyMapping(String policyFileName) { @@ -338,6 +338,7 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { ); LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner( sessionId, + "", new CancellableTask(1, "transport", "esql", null, TaskId.EMPTY_TASK_ID, Map.of()), bigArrays, blockFactory, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index 24e356520ff3d..3b64870a15839 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParserConstructor; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; @@ -56,10 +57,13 @@ import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.stream.Stream; +import static org.elasticsearch.common.xcontent.ChunkedToXContent.wrapAsToXContent; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.xpack.esql.action.EsqlQueryResponse.DROP_NULL_COLUMNS_OPTION; import static org.elasticsearch.xpack.esql.action.ResponseValueUtils.valuesToPage; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; @@ -152,6 +156,12 @@ private Page randomPage(List columns) { case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(randomIdentifier()).toBytesRef()); case "geo_point" -> ((BytesRefBlock.Builder) builder).appendBytesRef(GEO.asWkb(GeometryTestUtils.randomPoint())); case "cartesian_point" -> ((BytesRefBlock.Builder) builder).appendBytesRef(CARTESIAN.asWkb(ShapeTestUtils.randomPoint())); + case "geo_shape" -> ((BytesRefBlock.Builder) builder).appendBytesRef( + GEO.asWkb(GeometryTestUtils.randomGeometry(randomBoolean())) + ); + case "cartesian_shape" -> ((BytesRefBlock.Builder) builder).appendBytesRef( + CARTESIAN.asWkb(ShapeTestUtils.randomGeometry(randomBoolean())) + ); case "null" -> builder.appendNull(); case "_source" -> { try { @@ -323,28 +333,38 @@ public void testChunkResponseSizeRows() { public void testSimpleXContentColumnar() { try (EsqlQueryResponse response = simple(true)) { - assertThat(Strings.toString(response), equalTo(""" + assertThat(Strings.toString(wrapAsToXContent(response)), equalTo(""" {"columns":[{"name":"foo","type":"integer"}],"values":[[40,80]]}""")); } } + public void testSimpleXContentColumnarDropNulls() { + try (EsqlQueryResponse response = simple(true)) { + assertThat( + Strings.toString(wrapAsToXContent(response), new ToXContent.MapParams(Map.of(DROP_NULL_COLUMNS_OPTION, "true"))), + equalTo(""" + {"all_columns":[{"name":"foo","type":"integer"}],"columns":[{"name":"foo","type":"integer"}],"values":[[40,80]]}""") + ); + } + } + public void testSimpleXContentColumnarAsync() { try (EsqlQueryResponse response = simple(true, true)) { - assertThat(Strings.toString(response), equalTo(""" + assertThat(Strings.toString(wrapAsToXContent(response)), equalTo(""" {"is_running":false,"columns":[{"name":"foo","type":"integer"}],"values":[[40,80]]}""")); } } public void testSimpleXContentRows() { try (EsqlQueryResponse response = simple(false)) { - assertThat(Strings.toString(response), equalTo(""" + assertThat(Strings.toString(wrapAsToXContent(response)), equalTo(""" {"columns":[{"name":"foo","type":"integer"}],"values":[[40],[80]]}""")); } } public void testSimpleXContentRowsAsync() { try (EsqlQueryResponse response = simple(false, true)) { - assertThat(Strings.toString(response), equalTo(""" + assertThat(Strings.toString(wrapAsToXContent(response)), equalTo(""" {"is_running":false,"columns":[{"name":"foo","type":"integer"}],"values":[[40],[80]]}""")); } } @@ -366,6 +386,58 @@ public void testBasicXContentIdAndRunning() { } } + public void testNullColumnsXContentDropNulls() { + try ( + EsqlQueryResponse response = new EsqlQueryResponse( + List.of(new ColumnInfo("foo", "integer"), new ColumnInfo("all_null", "integer")), + List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock(), blockFactory.newConstantNullBlock(2))), + null, + false, + null, + false, + false + ) + ) { + assertThat( + Strings.toString(wrapAsToXContent(response), new ToXContent.MapParams(Map.of(DROP_NULL_COLUMNS_OPTION, "true"))), + equalTo("{" + """ + "all_columns":[{"name":"foo","type":"integer"},{"name":"all_null","type":"integer"}],""" + """ + "columns":[{"name":"foo","type":"integer"}],""" + """ + "values":[[40],[80]]}""") + ); + } + } + + /** + * This is a paranoid test to make sure the {@link Block}s produced by {@link Block.Builder} + * that contain only {@code null} entries are properly recognized by the {@link EsqlQueryResponse#DROP_NULL_COLUMNS_OPTION}. + */ + public void testNullColumnsFromBuilderXContentDropNulls() { + try (IntBlock.Builder b = blockFactory.newIntBlockBuilder(2)) { + b.appendNull(); + b.appendNull(); + try ( + EsqlQueryResponse response = new EsqlQueryResponse( + List.of(new ColumnInfo("foo", "integer"), new ColumnInfo("all_null", "integer")), + List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock(), b.build())), + null, + false, + null, + false, + false + ) + ) { + assertThat( + Strings.toString(wrapAsToXContent(response), new ToXContent.MapParams(Map.of(DROP_NULL_COLUMNS_OPTION, "true"))), + equalTo("{" + """ + "all_columns":[{"name":"foo","type":"integer"},{"name":"all_null","type":"integer"}],""" + """ + "columns":[{"name":"foo","type":"integer"}],""" + """ + "values":[[40],[80]]}""") + ); + } + } + } + private EsqlQueryResponse simple(boolean columnar) { return simple(columnar, false); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java index e357efe3fcc1f..605bfa7b05bff 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java @@ -9,7 +9,6 @@ import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.EsqlTestUtils; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; @@ -19,6 +18,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Set; import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; @@ -88,28 +88,17 @@ public static IndexResolution analyzerExpandedDefaultMapping() { } public static EnrichResolution defaultEnrichResolution() { - EnrichPolicyResolution policyRes = loadEnrichPolicyResolution( - "languages", - "language_code", - "languages_idx", - "mapping-languages.json" - ); - return new EnrichResolution(Set.of(policyRes), Set.of("languages")); + return loadEnrichPolicyResolution("languages", "language_code", "languages_idx", "mapping-languages.json"); } - public static EnrichPolicyResolution loadEnrichPolicyResolution( - String policyName, - String matchField, - String idxName, - String mappingFile - ) { + public static EnrichResolution loadEnrichPolicyResolution(String policyName, String matchField, String idxName, String mappingFile) { IndexResolution mapping = loadMapping(mappingFile, idxName); List enrichFields = new ArrayList<>(mapping.get().mapping().keySet()); enrichFields.remove(matchField); - return new EnrichPolicyResolution( - policyName, - new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(idxName), matchField, enrichFields), - mapping - ); + EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(idxName), matchField, enrichFields); + EnrichResolution enrichResolution = new EnrichResolution(); + enrichResolution.addResolvedPolicy(policyName, policy, Map.of("", idxName), mapping.get().mapping()); + enrichResolution.addExistingPolicies(Set.of(policyName)); + return enrichResolution; } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 90e45a0a8b5a7..56ac25a3561af 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.expression.function.aggregate.Max; import org.elasticsearch.xpack.esql.parser.ParsingException; @@ -1255,78 +1254,78 @@ public void testEmptyEsRelationOnCountStar() throws IOException { } public void testUnsupportedFieldsInStats() { - var errorMsg = "Cannot use field [shape] with unsupported type [geo_shape]"; + var errorMsg = "Cannot use field [unsupported] with unsupported type [ip_range]"; verifyUnsupported(""" from test - | stats max(shape) + | stats max(unsupported) """, errorMsg); verifyUnsupported(""" from test - | stats max(int) by shape + | stats max(int) by unsupported """, errorMsg); verifyUnsupported(""" from test - | stats max(int) by bool, shape + | stats max(int) by bool, unsupported """, errorMsg); } public void testUnsupportedFieldsInEval() { - var errorMsg = "Cannot use field [shape] with unsupported type [geo_shape]"; + var errorMsg = "Cannot use field [unsupported] with unsupported type [ip_range]"; verifyUnsupported(""" from test - | eval x = shape + | eval x = unsupported """, errorMsg); verifyUnsupported(""" from test - | eval foo = 1, x = shape + | eval foo = 1, x = unsupported """, errorMsg); verifyUnsupported(""" from test - | eval x = 1 + shape + | eval x = 1 + unsupported """, errorMsg); } public void testUnsupportedFieldsInWhere() { - var errorMsg = "Cannot use field [shape] with unsupported type [geo_shape]"; + var errorMsg = "Cannot use field [unsupported] with unsupported type [ip_range]"; verifyUnsupported(""" from test - | where shape == "[1.0, 1.0]" + | where unsupported == "[1.0, 1.0]" """, errorMsg); verifyUnsupported(""" from test - | where int > 2 and shape == "[1.0, 1.0]" + | where int > 2 and unsupported == "[1.0, 1.0]" """, errorMsg); } public void testUnsupportedFieldsInSort() { - var errorMsg = "Cannot use field [shape] with unsupported type [geo_shape]"; + var errorMsg = "Cannot use field [unsupported] with unsupported type [ip_range]"; verifyUnsupported(""" from test - | sort shape + | sort unsupported """, errorMsg); verifyUnsupported(""" from test - | sort int, shape + | sort int, unsupported """, errorMsg); } public void testUnsupportedFieldsInDissect() { - var errorMsg = "Cannot use field [shape] with unsupported type [geo_shape]"; + var errorMsg = "Cannot use field [unsupported] with unsupported type [ip_range]"; verifyUnsupported(""" from test - | dissect shape \"%{foo}\" + | dissect unsupported \"%{foo}\" """, errorMsg); } public void testUnsupportedFieldsInGrok() { - var errorMsg = "Cannot use field [shape] with unsupported type [geo_shape]"; + var errorMsg = "Cannot use field [unsupported] with unsupported type [ip_range]"; verifyUnsupported(""" from test - | grok shape \"%{WORD:foo}\" + | grok unsupported \"%{WORD:foo}\" """, errorMsg); } @@ -1350,7 +1349,8 @@ public void testRegexOnInt() { public void testUnsupportedTypesWithToString() { // DATE_PERIOD and TIME_DURATION types have been added, but not really patched through the engine; i.e. supported. - final String supportedTypes = "boolean or cartesian_point or datetime or geo_point or ip or numeric or string or version"; + final String supportedTypes = + "boolean or cartesian_point or cartesian_shape or datetime or geo_point or geo_shape or ip or numeric or string or version"; verifyUnsupported( "row period = 1 year | eval to_string(period)", "line 1:28: argument of [to_string(period)] must be [" + supportedTypes + "], found value [period] type [date_period]" @@ -1359,7 +1359,10 @@ public void testUnsupportedTypesWithToString() { "row duration = 1 hour | eval to_string(duration)", "line 1:30: argument of [to_string(duration)] must be [" + supportedTypes + "], found value [duration] type [time_duration]" ); - verifyUnsupported("from test | eval to_string(shape)", "line 1:28: Cannot use field [shape] with unsupported type [geo_shape]"); + verifyUnsupported( + "from test | eval to_string(unsupported)", + "line 1:28: Cannot use field [unsupported] with unsupported type [ip_range]" + ); } public void testNonExistingEnrichPolicy() { @@ -1455,10 +1458,9 @@ public void testEnrichFieldsIncludeMatchField() { IndexResolution testIndex = loadMapping("mapping-basic.json", "test"); IndexResolution languageIndex = loadMapping("mapping-languages.json", "languages"); var enrichPolicy = new EnrichPolicy("match", null, List.of("unused"), "language_code", List.of("language_code", "language_name")); - EnrichResolution enrichResolution = new EnrichResolution( - Set.of(new EnrichPolicyResolution("languages", enrichPolicy, languageIndex)), - Set.of("languages") - ); + EnrichResolution enrichResolution = new EnrichResolution(); + enrichResolution.addResolvedPolicy("languages", enrichPolicy, Map.of("", "languages"), languageIndex.get().mapping()); + enrichResolution.addExistingPolicies(Set.of("languages")); AnalyzerContext context = new AnalyzerContext(configuration(query), new EsqlFunctionRegistry(), testIndex, enrichResolution); Analyzer analyzer = new Analyzer(context, TEST_VERIFIER); LogicalPlan plan = analyze(query, analyzer); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 2596340e7f206..4c8e58fceffde 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -70,17 +70,13 @@ public void testAggsExpressionsInStatsAggs() { error("from test | stats length(first_name), count(1) by first_name") ); assertEquals( - "1:19: aggregate function's field must be an attribute or literal; found [emp_no / 2] of type [Div]", - error("from test | stats x = avg(emp_no / 2) by emp_no") + "1:23: nested aggregations [max(salary)] not allowed inside other aggregations [max(max(salary))]", + error("from test | stats max(max(salary)) by first_name") ); assertEquals( "1:25: argument of [avg(first_name)] must be [numeric], found value [first_name] type [keyword]", error("from test | stats count(avg(first_name)) by first_name") ); - assertEquals( - "1:19: aggregate function's field must be an attribute or literal; found [length(first_name)] of type [Length]", - error("from test | stats count(length(first_name)) by first_name") - ); assertEquals( "1:23: expected an aggregate function or group but got [emp_no + avg(emp_no)] of type [Add]", error("from test | stats x = emp_no + avg(emp_no) by emp_no") @@ -95,6 +91,17 @@ public void testAggsExpressionsInStatsAggs() { ); } + public void testAggsInsideGrouping() { + assertEquals( + "1:36: cannot use an aggregate [max(languages)] for grouping", + error("from test| stats max(languages) by max(languages)") + ); + } + + public void testAggsInsideEval() throws Exception { + assertEquals("1:29: aggregate function [max(b)] not allowed outside STATS command", error("row a = 1, b = 2 | eval x = max(b)")); + } + public void testDoubleRenamingField() { assertEquals( "1:44: Column [emp_no] renamed to [r1] and is no longer available [emp_no as r3]", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index e3ff92000ab21..ff34823aa6d88 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -130,6 +130,8 @@ public static Literal randomLiteral(DataType type) { case "version" -> randomVersion().toBytesRef(); case "geo_point" -> GEO.asWkb(GeometryTestUtils.randomPoint()); case "cartesian_point" -> CARTESIAN.asWkb(ShapeTestUtils.randomPoint()); + case "geo_shape" -> GEO.asWkb(GeometryTestUtils.randomGeometry(randomBoolean())); + case "cartesian_shape" -> CARTESIAN.asWkb(ShapeTestUtils.randomGeometry(randomBoolean())); case "null" -> null; case "_source" -> { try { @@ -909,7 +911,9 @@ private static String typeErrorMessage(boolean includeOrdinal, List expectedValue.apply((BytesRef) n), warnings); } + /** + * Generate positive test cases for a unary function operating on an {@link EsqlDataTypes#GEO_SHAPE}. + */ + public static void forUnaryGeoShape( + List suppliers, + String expectedEvaluatorToString, + DataType expectedType, + Function expectedValue, + List warnings + ) { + unary(suppliers, expectedEvaluatorToString, geoShapeCases(), expectedType, n -> expectedValue.apply((BytesRef) n), warnings); + } + + /** + * Generate positive test cases for a unary function operating on an {@link EsqlDataTypes#CARTESIAN_SHAPE}. + */ + public static void forUnaryCartesianShape( + List suppliers, + String expectedEvaluatorToString, + DataType expectedType, + Function expectedValue, + List warnings + ) { + unary(suppliers, expectedEvaluatorToString, cartesianShapeCases(), expectedType, n -> expectedValue.apply((BytesRef) n), warnings); + } + /** * Generate positive test cases for a unary function operating on an {@link DataTypes#IP}. */ @@ -922,6 +948,26 @@ private static List cartesianPointCases() { ); } + private static List geoShapeCases() { + return List.of( + new TypedDataSupplier( + "", + () -> GEO.asWkb(GeometryTestUtils.randomGeometry(ESTestCase.randomBoolean())), + EsqlDataTypes.GEO_SHAPE + ) + ); + } + + private static List cartesianShapeCases() { + return List.of( + new TypedDataSupplier( + "", + () -> CARTESIAN.asWkb(ShapeTestUtils.randomGeometry(ESTestCase.randomBoolean())), + EsqlDataTypes.CARTESIAN_SHAPE + ) + ); + } + public static List ipCases() { return List.of( new TypedDataSupplier( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java new file mode 100644 index 0000000000000..961aaacab0423 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; + +public class ToCartesianShapeTests extends AbstractFunctionTestCase { + public ToCartesianShapeTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + // TODO multivalue fields + final String attribute = "Attribute[channel=0]"; + final Function evaluatorName = s -> "ToCartesianShape" + s + "Evaluator[field=" + attribute + "]"; + final List suppliers = new ArrayList<>(); + + TestCaseSupplier.forUnaryCartesianShape(suppliers, attribute, EsqlDataTypes.CARTESIAN_SHAPE, v -> v, List.of()); + // random strings that don't look like a geo point + TestCaseSupplier.forUnaryStrings( + suppliers, + evaluatorName.apply("FromString"), + EsqlDataTypes.CARTESIAN_SHAPE, + bytesRef -> null, + bytesRef -> { + var exception = expectThrows(Exception.class, () -> CARTESIAN.wktToWkb(bytesRef.utf8ToString())); + return List.of( + "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", + "Line -1:-1: " + exception + ); + } + ); + // strings that are cartesian_shape representations + TestCaseSupplier.unary( + suppliers, + evaluatorName.apply("FromString"), + List.of( + new TestCaseSupplier.TypedDataSupplier( + "", + () -> new BytesRef(CARTESIAN.asWkt(GeometryTestUtils.randomGeometry(randomBoolean()))), + DataTypes.KEYWORD + ) + ), + EsqlDataTypes.CARTESIAN_SHAPE, + bytesRef -> CARTESIAN.wktToWkb(((BytesRef) bytesRef).utf8ToString()), + List.of() + ); + + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + } + + @Override + protected Expression build(Source source, List args) { + return new ToCartesianShape(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java new file mode 100644 index 0000000000000..dd9fcbd4951d7 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; + +public class ToGeoShapeTests extends AbstractFunctionTestCase { + public ToGeoShapeTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + // TODO multivalue fields + final String attribute = "Attribute[channel=0]"; + final Function evaluatorName = s -> "ToGeoShape" + s + "Evaluator[field=" + attribute + "]"; + final List suppliers = new ArrayList<>(); + + TestCaseSupplier.forUnaryGeoShape(suppliers, attribute, EsqlDataTypes.GEO_SHAPE, v -> v, List.of()); + // random strings that don't look like a geo point + TestCaseSupplier.forUnaryStrings( + suppliers, + evaluatorName.apply("FromString"), + EsqlDataTypes.GEO_SHAPE, + bytesRef -> null, + bytesRef -> { + var exception = expectThrows(Exception.class, () -> GEO.wktToWkb(bytesRef.utf8ToString())); + return List.of( + "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", + "Line -1:-1: " + exception + ); + } + ); + // strings that are geo_shape representations + TestCaseSupplier.unary( + suppliers, + evaluatorName.apply("FromString"), + List.of( + new TestCaseSupplier.TypedDataSupplier( + "", + () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomGeometry(randomBoolean()))), + DataTypes.KEYWORD + ) + ), + EsqlDataTypes.GEO_SHAPE, + bytesRef -> GEO.wktToWkb(((BytesRef) bytesRef).utf8ToString()), + List.of() + ); + + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + } + + @Override + protected Expression build(Source source, List args) { + return new ToGeoShape(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java index 918956de08648..9d5eed2ca2ebe 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java @@ -101,6 +101,20 @@ public static Iterable parameters() { wkb -> new BytesRef(CARTESIAN.wkbToWkt(wkb)), List.of() ); + TestCaseSupplier.forUnaryGeoShape( + suppliers, + "ToStringFromGeoShapeEvaluator[field=" + read + "]", + DataTypes.KEYWORD, + wkb -> new BytesRef(GEO.wkbToWkt(wkb)), + List.of() + ); + TestCaseSupplier.forUnaryCartesianShape( + suppliers, + "ToStringFromCartesianShapeEvaluator[field=" + read + "]", + DataTypes.KEYWORD, + wkb -> new BytesRef(CARTESIAN.wkbToWkt(wkb)), + List.of() + ); TestCaseSupplier.forUnaryIp( suppliers, "ToStringFromIPEvaluator[field=" + read + "]", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java index d2e7e924fb95c..ecedb00e65597 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java @@ -11,7 +11,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.ShapeTestUtils; -import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.Geometry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; @@ -415,7 +415,7 @@ protected static void geoPoints( DataType expectedDataType, BiFunction, Matcher> matcher ) { - points(cases, name, evaluatorName, EsqlDataTypes.GEO_POINT, expectedDataType, GEO, GeometryTestUtils::randomPoint, matcher); + spatial(cases, name, evaluatorName, EsqlDataTypes.GEO_POINT, expectedDataType, GEO, GeometryTestUtils::randomPoint, matcher); } /** @@ -443,7 +443,7 @@ protected static void cartesianPoints( DataType expectedDataType, BiFunction, Matcher> matcher ) { - points( + spatial( cases, name, evaluatorName, @@ -456,20 +456,68 @@ protected static void cartesianPoints( } /** - * Build many test cases with either {@code geo_point} or {@code cartesian_point} values. + * Build many test cases with {@code geo_shape} values that are converted to another type. + * This assumes that the function consumes {@code geo_shape} values and produces another type. + * For example, mv_count() can consume geo_shapes and produce an integer count. */ - protected static void points( + protected static void geoShape( + List cases, + String name, + String evaluatorName, + DataType expectedDataType, + BiFunction, Matcher> matcher + ) { + spatial( + cases, + name, + evaluatorName, + EsqlDataTypes.GEO_SHAPE, + expectedDataType, + GEO, + () -> GeometryTestUtils.randomGeometry(randomBoolean()), + matcher + ); + } + + /** + * Build many test cases with {@code cartesian_shape} values that are converted to another type. + * This assumes that the function consumes {@code cartesian_shape} values and produces another type. + * For example, mv_count() can consume cartesian shapes and produce an integer count. + */ + protected static void cartesianShape( + List cases, + String name, + String evaluatorName, + DataType expectedDataType, + BiFunction, Matcher> matcher + ) { + spatial( + cases, + name, + evaluatorName, + EsqlDataTypes.CARTESIAN_SHAPE, + expectedDataType, + CARTESIAN, + () -> ShapeTestUtils.randomGeometry(randomBoolean()), + matcher + ); + } + + /** + * Build many test cases for spatial values + */ + protected static void spatial( List cases, String name, String evaluatorName, DataType dataType, DataType expectedDataType, SpatialCoordinateTypes spatial, - Supplier randomPoint, + Supplier randomGeometry, BiFunction, Matcher> matcher ) { cases.add(new TestCaseSupplier(name + "(" + dataType.typeName() + ")", List.of(dataType), () -> { - BytesRef wkb = spatial.asWkb(randomPoint.get()); + BytesRef wkb = spatial.asWkb(randomGeometry.get()); return new TestCaseSupplier.TestCase( List.of(new TestCaseSupplier.TypedData(List.of(wkb), dataType, "field")), evaluatorName + "[field=Attribute[channel=0]]", @@ -479,7 +527,7 @@ protected static void points( })); for (Block.MvOrdering ordering : Block.MvOrdering.values()) { cases.add(new TestCaseSupplier(name + "(<" + dataType.typeName() + "s>) " + ordering, List.of(dataType), () -> { - List mvData = randomList(1, 100, () -> spatial.asWkb(randomPoint.get())); + List mvData = randomList(1, 100, () -> spatial.asWkb(randomGeometry.get())); putInOrder(mvData, ordering); return new TestCaseSupplier.TestCase( List.of(new TestCaseSupplier.TypedData(mvData, dataType, "field")), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java index 1abbd62faa0bd..342baf405d0c3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java @@ -39,6 +39,8 @@ public static Iterable parameters() { dateTimes(cases, "mv_count", "MvCount", DataTypes.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); geoPoints(cases, "mv_count", "MvCount", DataTypes.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); cartesianPoints(cases, "mv_count", "MvCount", DataTypes.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); + geoShape(cases, "mv_count", "MvCount", DataTypes.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); + cartesianShape(cases, "mv_count", "MvCount", DataTypes.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, cases))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java index 91c30b7c1f566..0f52efe20399e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java @@ -41,6 +41,8 @@ public static Iterable parameters() { dateTimes(cases, "mv_first", "MvFirst", DataTypes.DATETIME, (size, values) -> equalTo(values.findFirst().getAsLong())); geoPoints(cases, "mv_first", "MvFirst", EsqlDataTypes.GEO_POINT, (size, values) -> equalTo(values.findFirst().get())); cartesianPoints(cases, "mv_first", "MvFirst", EsqlDataTypes.CARTESIAN_POINT, (size, values) -> equalTo(values.findFirst().get())); + geoShape(cases, "mv_first", "MvFirst", EsqlDataTypes.GEO_SHAPE, (size, values) -> equalTo(values.findFirst().get())); + cartesianShape(cases, "mv_first", "MvFirst", EsqlDataTypes.CARTESIAN_SHAPE, (size, values) -> equalTo(values.findFirst().get())); return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java index 7577cbf7dd0a8..41abab22c72ef 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java @@ -47,6 +47,14 @@ public static Iterable parameters() { EsqlDataTypes.CARTESIAN_POINT, (size, values) -> equalTo(values.reduce((f, s) -> s).get()) ); + geoShape(cases, "mv_last", "MvLast", EsqlDataTypes.GEO_SHAPE, (size, values) -> equalTo(values.reduce((f, s) -> s).get())); + cartesianShape( + cases, + "mv_last", + "MvLast", + EsqlDataTypes.CARTESIAN_SHAPE, + (size, values) -> equalTo(values.reduce((f, s) -> s).get()) + ); return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java index 37ab820146bf4..71aa945594584 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java @@ -94,8 +94,8 @@ protected final void validateType(BinaryOperator op, DataType lhsTyp equalTo( String.format( Locale.ROOT, - "first argument of [%s %s] must be [numeric, keyword, text, ip, datetime, version, geo_point or " - + "cartesian_point], found value [] type [%s]", + "first argument of [%s %s] must be [numeric, keyword, text, ip, datetime, version, geo_point, " + + "geo_shape, cartesian_point or cartesian_shape], found value [] type [%s]", lhsType.typeName(), rhsType.typeName(), lhsType.typeName() diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index 5887d61c652bb..2716c4ff5195e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.analysis.Verifier; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; @@ -133,25 +132,17 @@ public void init() { physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(config)); FunctionRegistry functionRegistry = new EsqlFunctionRegistry(); mapper = new Mapper(functionRegistry); - var enrichResolution = new EnrichResolution( - Set.of( - new EnrichPolicyResolution( - "foo", - new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("idx"), "fld", List.of("a", "b")), - IndexResolution.valid( - new EsIndex( - "idx", - Map.ofEntries( - Map.entry("a", new EsField("a", DataTypes.INTEGER, Map.of(), true)), - Map.entry("b", new EsField("b", DataTypes.LONG, Map.of(), true)) - ) - ) - ) - ) - ), - Set.of("foo") + EnrichResolution enrichResolution = new EnrichResolution(); + enrichResolution.addResolvedPolicy( + "foo", + new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("idx"), "fld", List.of("a", "b")), + Map.of("", "idx"), + Map.ofEntries( + Map.entry("a", new EsField("a", DataTypes.INTEGER, Map.of(), true)), + Map.entry("b", new EsField("b", DataTypes.LONG, Map.of(), true)) + ) ); - + enrichResolution.addExistingPolicies(Set.of("foo")); analyzer = new Analyzer( new AnalyzerContext(config, functionRegistry, getIndexResult, enrichResolution), new Verifier(new Metrics()) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 6320294d7ee54..50649f79516e5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -15,8 +15,6 @@ import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils; -import org.elasticsearch.xpack.esql.analysis.EnrichResolution; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; @@ -37,6 +35,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.math.Round; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Substring; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mod; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.parser.EsqlParser; @@ -82,7 +81,6 @@ import java.util.List; import java.util.Map; -import java.util.Set; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -98,6 +96,7 @@ import static org.elasticsearch.xpack.ql.tree.Source.EMPTY; import static org.elasticsearch.xpack.ql.type.DataTypes.INTEGER; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyArray; @@ -128,19 +127,14 @@ public static void init() { IndexResolution getIndexResult = IndexResolution.valid(test); logicalOptimizer = new LogicalPlanOptimizer(new LogicalOptimizerContext(EsqlTestUtils.TEST_CFG)); - EnrichPolicyResolution policy = AnalyzerTestUtils.loadEnrichPolicyResolution( + var enrichResolution = AnalyzerTestUtils.loadEnrichPolicyResolution( "languages_idx", "id", "languages_idx", "mapping-languages.json" ); analyzer = new Analyzer( - new AnalyzerContext( - EsqlTestUtils.TEST_CFG, - new EsqlFunctionRegistry(), - getIndexResult, - new EnrichResolution(Set.of(policy), Set.of("languages_idx", "something")) - ), + new AnalyzerContext(EsqlTestUtils.TEST_CFG, new EsqlFunctionRegistry(), getIndexResult, enrichResolution), TEST_VERIFIER ); } @@ -2768,6 +2762,102 @@ public void testIsNotNullConstraintForAliasedExpressions() { var from = as(eval.child(), EsRelation.class); } + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[emp_no%2{r}#6],[COUNT(salary{f}#12) AS c, emp_no%2{r}#6]] + * \_Eval[[emp_no{f}#7 % 2[INTEGER] AS emp_no%2]] + * \_EsRelation[test][_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, ge..] + */ + public void testNestedExpressionsInGroups() { + var plan = optimizedPlan(""" + from test + | stats c = count(salary) by emp_no % 2 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var groupings = agg.groupings(); + var aggs = agg.aggregates(); + var ref = as(groupings.get(0), ReferenceAttribute.class); + assertThat(aggs.get(1), is(ref)); + var eval = as(agg.child(), Eval.class); + assertThat(eval.fields(), hasSize(1)); + assertThat(eval.fields().get(0).toAttribute(), is(ref)); + assertThat(eval.fields().get(0).name(), is("emp_no % 2")); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[emp_no{f}#6],[COUNT(__c_COUNT@1bd45f36{r}#16) AS c, emp_no{f}#6]] + * \_Eval[[salary{f}#11 + 1[INTEGER] AS __c_COUNT@1bd45f36]] + * \_EsRelation[test][_meta_field{f}#12, emp_no{f}#6, first_name{f}#7, ge..] + */ + public void testNestedExpressionsInAggs() { + var plan = optimizedPlan(""" + from test + | stats c = count(salary + 1) by emp_no + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var aggs = agg.aggregates(); + var count = aliased(aggs.get(0), Count.class); + var ref = as(count.field(), ReferenceAttribute.class); + var eval = as(agg.child(), Eval.class); + var fields = eval.fields(); + assertThat(fields, hasSize(1)); + assertThat(fields.get(0).toAttribute(), is(ref)); + var add = aliased(fields.get(0), Add.class); + assertThat(Expressions.name(add.left()), is("salary")); + } + + /** + * Limit[500[INTEGER]] + * \_Aggregate[[emp_no%2{r}#7],[COUNT(__c_COUNT@fb7855b0{r}#18) AS c, emp_no%2{r}#7]] + * \_Eval[[emp_no{f}#8 % 2[INTEGER] AS emp_no%2, 100[INTEGER] / languages{f}#11 + salary{f}#13 + 1[INTEGER] AS __c_COUNT + * @fb7855b0]] + * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] + */ + public void testNestedExpressionsInBothAggsAndGroups() { + var plan = optimizedPlan(""" + from test + | stats c = count(salary + 1 + 100 / languages) by emp_no % 2 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var groupings = agg.groupings(); + var aggs = agg.aggregates(); + var gRef = as(groupings.get(0), ReferenceAttribute.class); + assertThat(aggs.get(1), is(gRef)); + + var count = aliased(aggs.get(0), Count.class); + var aggRef = as(count.field(), ReferenceAttribute.class); + var eval = as(agg.child(), Eval.class); + var fields = eval.fields(); + assertThat(fields, hasSize(2)); + assertThat(fields.get(0).toAttribute(), is(gRef)); + assertThat(fields.get(1).toAttribute(), is(aggRef)); + + var mod = aliased(fields.get(0), Mod.class); + assertThat(Expressions.name(mod.left()), is("emp_no")); + var refs = Expressions.references(singletonList(fields.get(1))); + assertThat(Expressions.names(refs), containsInAnyOrder("languages", "salary")); + } + + public void testNestedMultiExpressionsInGroupingAndAggs() { + var plan = optimizedPlan(""" + from test + | stats count(salary + 1), max(salary + 23) by languages + 1, emp_no % 3 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.output()), contains("count(salary + 1)", "max(salary + 23)", "languages + 1", "emp_no % 3")); + } + private LogicalPlan optimizedPlan(String query) { return plan(query); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index c05e11d8d8a13..6a1bffe22cd7a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; @@ -164,25 +163,17 @@ public void init() { physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(config)); FunctionRegistry functionRegistry = new EsqlFunctionRegistry(); mapper = new Mapper(functionRegistry); - var enrichResolution = new EnrichResolution( - Set.of( - new EnrichPolicyResolution( - "foo", - new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("idx"), "fld", List.of("a", "b")), - IndexResolution.valid( - new EsIndex( - "idx", - Map.ofEntries( - Map.entry("a", new EsField("a", DataTypes.INTEGER, Map.of(), true)), - Map.entry("b", new EsField("b", DataTypes.LONG, Map.of(), true)) - ) - ) - ) - ) - ), - Set.of("foo") + EnrichResolution enrichResolution = new EnrichResolution(); + enrichResolution.addResolvedPolicy( + "foo", + new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("idx"), "fld", List.of("a", "b")), + Map.of("", "idx"), + Map.ofEntries( + Map.entry("a", new EsField("a", DataTypes.INTEGER, Map.of(), true)), + Map.entry("b", new EsField("b", DataTypes.LONG, Map.of(), true)) + ) ); - + enrichResolution.addExistingPolicies(Set.of("foo")); analyzer = new Analyzer(new AnalyzerContext(config, functionRegistry, getIndexResult, enrichResolution), TEST_VERIFIER); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 931c96a8cb8ed..4b908e815ffe3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -54,6 +54,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; import java.util.function.Function; import static org.elasticsearch.xpack.esql.EsqlTestUtils.as; @@ -226,7 +227,7 @@ public void testEvalImplicitNames() { List.of( new Alias( EMPTY, - "fn(a+1)", + "fn(a + 1)", new UnresolvedFunction(EMPTY, "fn", DEFAULT, List.of(new Add(EMPTY, attribute("a"), integer(1)))) ) ) @@ -683,6 +684,7 @@ public void testEnrich() { new Literal(EMPTY, "countries", KEYWORD), new EmptyAttribute(EMPTY), null, + Map.of(), List.of() ), processingCommand("enrich countries") @@ -696,6 +698,7 @@ public void testEnrich() { new Literal(EMPTY, "index-policy", KEYWORD), new UnresolvedAttribute(EMPTY, "field_underscore"), null, + Map.of(), List.of() ), processingCommand("enrich index-policy ON field_underscore") @@ -710,6 +713,7 @@ public void testEnrich() { new Literal(EMPTY, "countries", KEYWORD), new UnresolvedAttribute(EMPTY, "country_code"), null, + Map.of(), List.of() ), processingCommand("enrich [ccq.mode :" + mode.name() + "] countries ON country_code") diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java index 3ac1453e6ad8f..c1ef69a0bf7ca 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java @@ -120,6 +120,7 @@ private Matcher maxPageSizeMatcher(boolean estimatedRowSizeIsHuge, int private LocalExecutionPlanner planner() throws IOException { return new LocalExecutionPlanner( "test", + "", null, BigArrays.NON_RECYCLING_INSTANCE, TestBlockFactory.getNonBreakingInstance(), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java index 1947249086568..37009c67e2c94 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; +import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.analysis.VerificationException; import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolver; import org.elasticsearch.xpack.esql.execution.PlanExecutor; @@ -54,14 +55,23 @@ public void shutdownThreadPool() throws Exception { terminate(threadPool); } + @SuppressWarnings("unchecked") + EnrichPolicyResolver mockEnrichResolver() { + EnrichPolicyResolver enrichResolver = mock(EnrichPolicyResolver.class); + doAnswer(invocation -> { + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new EnrichResolution()); + return null; + }).when(enrichResolver).resolvePolicy(any(), any()); + return enrichResolver; + } + public void testFailedMetric() { Client client = mock(Client.class); IndexResolver idxResolver = new IndexResolver(client, randomAlphaOfLength(10), EsqlDataTypeRegistry.INSTANCE, Set::of); var planExecutor = new PlanExecutor(idxResolver); String[] indices = new String[] { "test" }; - EnrichPolicyResolver enrichResolver = mock(EnrichPolicyResolver.class); - when(enrichResolver.allPolicyNames()).thenReturn(Set.of()); - + var enrichResolver = mockEnrichResolver(); // simulate a valid field_caps response so we can parse and correctly analyze de query FieldCapabilitiesResponse fieldCapabilitiesResponse = mock(FieldCapabilitiesResponse.class); when(fieldCapabilitiesResponse.getIndices()).thenReturn(indices); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java index c4350c8ec74d7..43dec76c7de24 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java @@ -11,8 +11,6 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.dissect.DissectParser; -import org.elasticsearch.xpack.core.enrich.EnrichPolicy; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pow; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; @@ -34,7 +32,6 @@ import org.elasticsearch.xpack.ql.expression.UnresolvedNamedExpression; import org.elasticsearch.xpack.ql.expression.UnresolvedStar; import org.elasticsearch.xpack.ql.expression.function.UnresolvedFunction; -import org.elasticsearch.xpack.ql.index.IndexResolution; import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.ql.tree.Node; import org.elasticsearch.xpack.ql.tree.NodeSubclassTests; @@ -86,20 +83,6 @@ protected Object pluggableMakeArg(Class> toBuildClass, Class li return; } - var hits = searchResponse.getHits().getHits(); - delegate.onResponse(UnparsedModel.unparsedModelFromMap(createModelConfigMap(hits, modelId))); + delegate.onResponse(UnparsedModel.unparsedModelFromMap(createModelConfigMap(searchResponse.getHits(), modelId))); }); QueryBuilder queryBuilder = documentIdQuery(modelId); @@ -132,8 +132,7 @@ public void getModel(String modelId, ActionListener listener) { return; } - var hits = searchResponse.getHits().getHits(); - var modelConfigs = parseHitsAsModels(hits).stream().map(UnparsedModel::unparsedModelFromMap).toList(); + var modelConfigs = parseHitsAsModels(searchResponse.getHits()).stream().map(UnparsedModel::unparsedModelFromMap).toList(); assert modelConfigs.size() == 1; delegate.onResponse(modelConfigs.get(0)); }); @@ -162,8 +161,7 @@ public void getModelsByTaskType(TaskType taskType, ActionListener> listener) { return; } - var hits = searchResponse.getHits().getHits(); - var modelConfigs = parseHitsAsModels(hits).stream().map(UnparsedModel::unparsedModelFromMap).toList(); + var modelConfigs = parseHitsAsModels(searchResponse.getHits()).stream().map(UnparsedModel::unparsedModelFromMap).toList(); delegate.onResponse(modelConfigs); }); @@ -212,7 +209,7 @@ public void getAllModels(ActionListener> listener) { client.search(modelSearch, searchListener); } - private List parseHitsAsModels(SearchHit[] hits) { + private List parseHitsAsModels(SearchHits hits) { var modelConfigs = new ArrayList(); for (var hit : hits) { modelConfigs.add(new ModelConfigMap(hit.getSourceAsMap(), Map.of())); @@ -220,8 +217,8 @@ private List parseHitsAsModels(SearchHit[] hits) { return modelConfigs; } - private ModelConfigMap createModelConfigMap(SearchHit[] hits, String modelId) { - Map mappedHits = Arrays.stream(hits).collect(Collectors.toMap(hit -> { + private ModelConfigMap createModelConfigMap(SearchHits hits, String modelId) { + Map mappedHits = Arrays.stream(hits.getHits()).collect(Collectors.toMap(hit -> { if (hit.getIndex().startsWith(InferenceIndex.INDEX_NAME)) { return InferenceIndex.INDEX_NAME; } diff --git a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java index f75dd2926059a..f595153e4d6dd 100644 --- a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java +++ b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java @@ -222,18 +222,18 @@ protected void } private SearchHits prepareSearchHits() { - SearchHit hit1 = new SearchHit(0, "1"); + SearchHit hit1 = SearchHit.unpooled(0, "1"); hit1.score(1f); hit1.shard(new SearchShardTarget("a", new ShardId("a", "indexUUID", 0), null)); - SearchHit hit2 = new SearchHit(0, "2"); + SearchHit hit2 = SearchHit.unpooled(0, "2"); hit2.score(1f); hit2.shard(new SearchShardTarget("a", new ShardId("a", "indexUUID", 0), null)); - SearchHit hit3 = new SearchHit(0, "3*"); + SearchHit hit3 = SearchHit.unpooled(0, "3*"); hit3.score(1f); hit3.shard(new SearchShardTarget("a", new ShardId("a", "indexUUID", 0), null)); - return new SearchHits(new SearchHit[] { hit1, hit2, hit3 }, new TotalHits(3L, TotalHits.Relation.EQUAL_TO), 1f); + return SearchHits.unpooled(new SearchHit[] { hit1, hit2, hit3 }, new TotalHits(3L, TotalHits.Relation.EQUAL_TO), 1f); } } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java index 5f2f7cfe491ca..f9213a7fcaeb8 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java @@ -10,12 +10,12 @@ import org.elasticsearch.action.admin.cluster.snapshots.features.ResetFeatureStateRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterModule; @@ -308,7 +308,7 @@ protected PutFilterAction.Response putMlFilter(MlFilter filter) { protected static List fetchAllAuditMessages(String jobId) throws Exception { RefreshRequest refreshRequest = new RefreshRequest(NotificationsIndex.NOTIFICATIONS_INDEX); - RefreshResponse refreshResponse = client().execute(RefreshAction.INSTANCE, refreshRequest).actionGet(); + BroadcastResponse refreshResponse = client().execute(RefreshAction.INSTANCE, refreshRequest).actionGet(); assertThat(refreshResponse.getStatus().getStatus(), anyOf(equalTo(200), equalTo(201))); SearchRequest searchRequest = new SearchRequestBuilder(client()).setIndices(NotificationsIndex.NOTIFICATIONS_INDEX) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java index a5c47524b6934..f28f6eff25b04 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java @@ -51,7 +51,6 @@ import static org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex.createStateIndexAndAliasIfNecessary; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; public class ModelSnapshotRetentionIT extends MlNativeAutodetectIntegTestCase { @@ -191,8 +190,7 @@ private List getAvailableModelStateDocIds() throws Exception { private List getDocIdsFromSearch(SearchRequest searchRequest) throws Exception { List docIds = new ArrayList<>(); assertResponse(client().execute(TransportSearchAction.TYPE, searchRequest), searchResponse -> { - assertThat(searchResponse.getHits(), notNullValue()); - for (SearchHit searchHit : searchResponse.getHits().getHits()) { + for (SearchHit searchHit : searchResponse.getHits()) { docIds.add(searchHit.getId()); } }); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java index 51f6243778517..ffe70d9747a56 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java @@ -7,12 +7,12 @@ package org.elasticsearch.xpack.ml.integration; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.license.License; @@ -109,7 +109,7 @@ public void testGetTrainedModelConfig() throws Exception { ); assertThat(exceptionHolder.get(), is(nullValue())); - AtomicReference refreshResponseAtomicReference = new AtomicReference<>(); + AtomicReference refreshResponseAtomicReference = new AtomicReference<>(); blockingCall( listener -> trainedModelProvider.refreshInferenceIndex(listener), refreshResponseAtomicReference, @@ -198,7 +198,7 @@ public void testGetTrainedModelConfigWithMultiDocDefinition() throws Exception { ); blockingCall( listener -> trainedModelProvider.refreshInferenceIndex(listener), - new AtomicReference(), + new AtomicReference(), new AtomicReference<>() ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 09cb8644dba4f..152d8fde8c86c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -32,6 +32,8 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; @@ -68,6 +70,7 @@ import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.PersistentTaskPlugin; +import org.elasticsearch.plugins.Platforms; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.plugins.ShutdownAwarePlugin; @@ -753,6 +756,7 @@ public void loadExtensions(ExtensionLoader loader) { public static final int MAX_LOW_PRIORITY_MODELS_PER_NODE = 100; private static final Logger logger = LogManager.getLogger(MachineLearning.class); + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(MachineLearning.class); private final Settings settings; private final boolean enabled; @@ -919,6 +923,15 @@ public Collection createComponents(PluginServices services) { return List.of(new JobManagerHolder(), new MachineLearningExtensionHolder()); } + if ("darwin-x86_64".equals(Platforms.PLATFORM_NAME)) { + String msg = "The machine learning plugin will be permanently disabled on macOS x86_64 in new minor versions released " + + "from December 2024 onwards. To continue to use machine learning functionality on macOS please switch to an arm64 " + + "machine (Apple silicon). Alternatively, it will still be possible to run Elasticsearch with machine learning " + + "enabled in a Docker container on macOS x86_64."; + logger.warn(msg); + deprecationLogger.warn(DeprecationCategory.PLUGINS, "ml-darwin-x86_64", msg); + } + machineLearningExtension.get().configure(environment.settings()); this.mlUpgradeModeActionFilter.set(new MlUpgradeModeActionFilter(clusterService)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java index 8ce41262a1e1d..bcf3c1f58cfa9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java @@ -11,7 +11,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; @@ -115,7 +115,7 @@ protected void masterOperation( } // package-private for testing - static void cancelDownloadTask(Client client, String modelId, ActionListener listener, TimeValue timeout) { + static void cancelDownloadTask(Client client, String modelId, ActionListener listener, TimeValue timeout) { logger.debug(() -> format("[%s] Checking if download task exists and cancelling it", modelId)); OriginSettingClient mlClient = new OriginSettingClient(client, ML_ORIGIN); @@ -283,11 +283,11 @@ private static void executeTaskCancellation( Client client, String modelId, TaskInfo taskInfo, - ActionListener listener, + ActionListener listener, TimeValue timeout ) { if (taskInfo != null) { - ActionListener cancelListener = ActionListener.wrap(listener::onResponse, e -> { + ActionListener cancelListener = ActionListener.wrap(listener::onResponse, e -> { Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof ResourceNotFoundException) { logger.debug(() -> format("[%s] Task no longer exists when attempting to cancel it", modelId)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java index 4cfcf6509faa0..be8a098ed3986 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.sort.SortOrder; @@ -126,8 +127,7 @@ protected InputStream initScroll(long startTimestamp) throws IOException { logger.debug("[{}] Search response was obtained", context.jobId); timingStatsReporter.reportSearchDuration(searchResponse.getTook()); scrollId = searchResponse.getScrollId(); - SearchHit hits[] = searchResponse.getHits().getHits(); - return processAndConsumeSearchHits(hits); + return processAndConsumeSearchHits(searchResponse.getHits()); } finally { searchResponse.decRef(); } @@ -184,9 +184,9 @@ private SearchRequestBuilder buildSearchRequest(long start) { /** * IMPORTANT: This is not an idempotent method. This method changes the input array by setting each element to null. */ - private InputStream processAndConsumeSearchHits(SearchHit hits[]) throws IOException { + private InputStream processAndConsumeSearchHits(SearchHits hits) throws IOException { - if (hits == null || hits.length == 0) { + if (hits.getHits().length == 0) { hasNext = false; clearScroll(); return null; @@ -194,11 +194,10 @@ private InputStream processAndConsumeSearchHits(SearchHit hits[]) throws IOExcep BytesStreamOutput outputStream = new BytesStreamOutput(); - SearchHit lastHit = hits[hits.length - 1]; + SearchHit lastHit = hits.getAt(hits.getHits().length - 1); lastTimestamp = context.extractedFields.timeFieldValue(lastHit); try (SearchHitToJsonProcessor hitProcessor = new SearchHitToJsonProcessor(context.extractedFields, outputStream)) { - for (int i = 0; i < hits.length; i++) { - SearchHit hit = hits[i]; + for (SearchHit hit : hits) { if (isCancelled) { Long timestamp = context.extractedFields.timeFieldValue(hit); if (timestamp != null) { @@ -212,9 +211,6 @@ private InputStream processAndConsumeSearchHits(SearchHit hits[]) throws IOExcep } } hitProcessor.process(hit); - // hack to remove the reference from object. This object can be big and consume alot of memory. - // We are removing it as soon as we process it. - hits[i] = null; } } return outputStream.bytes().streamInput(); @@ -237,8 +233,7 @@ private InputStream continueScroll() throws IOException { logger.debug("[{}] Search response was obtained", context.jobId); timingStatsReporter.reportSearchDuration(searchResponse.getTook()); scrollId = searchResponse.getScrollId(); - SearchHit hits[] = searchResponse.getHits().getHits(); - return processAndConsumeSearchHits(hits); + return processAndConsumeSearchHits(searchResponse.getHits()); } finally { if (searchResponse != null) { searchResponse.decRef(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java index 4119b23747fcb..c890ab599c380 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java @@ -19,6 +19,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ClientHelper; @@ -154,11 +155,11 @@ public void preview(ActionListener> listener) { return; } - final SearchHit[] hits = searchResponse.getHits().getHits(); - List rows = new ArrayList<>(hits.length); - for (SearchHit hit : hits) { - String[] extractedValues = extractValues(hit); - rows.add(extractedValues == null ? new Row(null, hit, true) : new Row(extractedValues, hit, false)); + List rows = new ArrayList<>(searchResponse.getHits().getHits().length); + for (SearchHit hit : searchResponse.getHits().getHits()) { + var unpooled = hit.asUnpooled(); + String[] extractedValues = extractValues(unpooled); + rows.add(extractedValues == null ? new Row(null, unpooled, true) : new Row(extractedValues, unpooled, false)); } delegate.onResponse(rows); }) @@ -251,8 +252,8 @@ private List processSearchResponse(SearchResponse searchResponse) { return null; } - SearchHit[] hits = searchResponse.getHits().getHits(); - List rows = new ArrayList<>(hits.length); + SearchHits hits = searchResponse.getHits(); + List rows = new ArrayList<>(hits.getHits().length); for (SearchHit hit : hits) { if (isCancelled) { hasNext = false; @@ -317,12 +318,13 @@ private String[] extractProcessedValue(ProcessedField processedField, SearchHit } private Row createRow(SearchHit hit) { - String[] extractedValues = extractValues(hit); + var unpooled = hit.asUnpooled(); + String[] extractedValues = extractValues(unpooled); if (extractedValues == null) { - return new Row(null, hit, true); + return new Row(null, unpooled, true); } boolean isTraining = trainTestSplitter.get().isTraining(extractedValues); - Row row = new Row(extractedValues, hit, isTraining); + Row row = new Row(extractedValues, unpooled, isTraining); LOGGER.trace( () -> format( "[%s] Extracted row: sort key = [%s], is_training = [%s], values = %s", diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/TestDocsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/TestDocsIterator.java index bd37706622187..9e2db58befdbf 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/TestDocsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/TestDocsIterator.java @@ -61,7 +61,7 @@ protected FieldSortBuilder sortField() { @Override protected SearchHit map(SearchHit hit) { - return hit; + return hit.asUnpooled(); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersister.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersister.java index 7eef0e526eac3..2012ca87578b0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersister.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersister.java @@ -12,7 +12,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.license.License; @@ -157,7 +157,7 @@ private CountDownLatch storeTrainedModelDoc(TrainedModelDefinitionDoc trainedMod CountDownLatch latch = new CountDownLatch(1); // Latch is attached to this action as it is the last one to execute. - ActionListener refreshListener = new LatchedActionListener<>(ActionListener.wrap(refreshed -> { + ActionListener refreshListener = new LatchedActionListener<>(ActionListener.wrap(refreshed -> { if (refreshed != null) { LOGGER.debug(() -> "[" + analytics.getId() + "] refreshed inference index after model store"); } @@ -210,7 +210,7 @@ private CountDownLatch storeTrainedModelMetadata(TrainedModelMetadata trainedMod CountDownLatch latch = new CountDownLatch(1); // Latch is attached to this action as it is the last one to execute. - ActionListener refreshListener = new LatchedActionListener<>(ActionListener.wrap(refreshed -> { + ActionListener refreshListener = new LatchedActionListener<>(ActionListener.wrap(refreshed -> { if (refreshed != null) { LOGGER.debug(() -> "[" + analytics.getId() + "] refreshed inference index after model metadata store"); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java index 1b6818a8727f3..0c693ff2d34f4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.tasks.TaskId; @@ -76,7 +76,7 @@ public final void execute(ActionListener listener) { protected abstract void doExecute(ActionListener listener); - protected void refreshDestAsync(ActionListener refreshListener) { + protected void refreshDestAsync(ActionListener refreshListener) { ParentTaskAssigningClient parentTaskClient = parentTaskClient(); executeWithHeadersAsync( config.getHeaders(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java index 8adf5b3f0621a..9e56387ed773e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.ml.dataframe.steps; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.TimeValue; @@ -63,7 +63,7 @@ protected void doExecute(ActionListener listener) { listener::onFailure ); - ActionListener refreshListener = ActionListener.wrap(refreshResponse -> { + ActionListener refreshListener = ActionListener.wrap(refreshResponse -> { // TODO This could fail with errors. In that case we get stuck with the copied index. // We could delete the index in case of failure or we could try building the factory before reindexing // to catch the error early on. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java index 7b27090dc302d..dbf1f3e7be3d9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java @@ -13,10 +13,10 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ToXContent; @@ -60,7 +60,7 @@ public Name name() { @Override protected void doExecute(ActionListener listener) { - ActionListener refreshListener = ActionListener.wrap( + ActionListener refreshListener = ActionListener.wrap( refreshResponse -> listener.onResponse(new StepResponse(false)), listener::onFailure ); @@ -89,7 +89,7 @@ private void indexDataCounts(ActionListener listener) { } } - private void refreshIndices(ActionListener listener) { + private void refreshIndices(ActionListener listener) { RefreshRequest refreshRequest = new RefreshRequest( AnomalyDetectorsIndex.jobStateIndexPattern(), MlStatsIndex.indexPattern(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java index 65ac2b678d93b..ad005e6d9ae6c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java @@ -11,9 +11,9 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; @@ -85,7 +85,7 @@ protected void doExecute(ActionListener listener) { } }, listener::onFailure); - ActionListener refreshDestListener = ActionListener.wrap( + ActionListener refreshDestListener = ActionListener.wrap( refreshResponse -> searchIfTestDocsExist(testDocsExistListener), listener::onFailure ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java index 1ca78df1fad3d..0ccdd1eb64601 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java @@ -12,8 +12,8 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.get.GetIndexAction; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; @@ -278,7 +278,7 @@ public void cancel(String reason, TimeValue timeout) { // We need to cancel the reindexing task within context with ML origin as we started the task // from the same context - CancelTasksResponse cancelReindexResponse = cancelTaskWithinMlOriginContext(cancelReindex); + ListTasksResponse cancelReindexResponse = cancelTaskWithinMlOriginContext(cancelReindex); Throwable firstError = null; if (cancelReindexResponse.getNodeFailures().isEmpty() == false) { @@ -296,7 +296,7 @@ public void cancel(String reason, TimeValue timeout) { } } - private CancelTasksResponse cancelTaskWithinMlOriginContext(CancelTasksRequest cancelTasksRequest) { + private ListTasksResponse cancelTaskWithinMlOriginContext(CancelTasksRequest cancelTasksRequest) { final ThreadContext threadContext = client.threadPool().getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashWithOrigin(ML_ORIGIN)) { return client.admin().cluster().cancelTasks(cancelTasksRequest).actionGet(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorer.java index 068462bcdfca2..4e3fa3addaf30 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorer.java @@ -55,6 +55,15 @@ public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext r if (ltrRescoreContext.regressionModelDefinition == null) { throw new IllegalStateException("local model reference is null, missing rewriteAndFetch before rescore phase?"); } + + if (rescoreContext.getWindowSize() < topDocs.scoreDocs.length) { + throw new IllegalArgumentException( + "Rescore window is too small and should be at least the value of from + size but was [" + + rescoreContext.getWindowSize() + + "]" + ); + } + LocalModel definition = ltrRescoreContext.regressionModelDefinition; // First take top slice of incoming docs, to be rescored: diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java index 11676cc4a1599..a5a7859a7f938 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java @@ -32,10 +32,10 @@ public class LearningToRankRescorerBuilder extends RescorerBuilder { - public static final String NAME = "learning_to_rank"; - private static final ParseField MODEL_FIELD = new ParseField("model_id"); - private static final ParseField PARAMS_FIELD = new ParseField("params"); - private static final ObjectParser PARSER = new ObjectParser<>(NAME, false, Builder::new); + public static final ParseField NAME = new ParseField("learning_to_rank"); + public static final ParseField MODEL_FIELD = new ParseField("model_id"); + public static final ParseField PARAMS_FIELD = new ParseField("params"); + private static final ObjectParser PARSER = new ObjectParser<>(NAME.getPreferredName(), false, Builder::new); static { PARSER.declareString(Builder::setModelId, MODEL_FIELD); @@ -251,7 +251,7 @@ protected LearningToRankRescorerContext innerBuildContext(int windowSize, Search @Override public String getWriteableName() { - return NAME; + return NAME.getPreferredName(); } @Override @@ -260,6 +260,11 @@ public TransportVersion getMinimalSupportedVersion() { return TransportVersion.current(); } + @Override + protected boolean isWindowSizeRequired() { + return true; + } + @Override protected void doWriteTo(StreamOutput out) throws IOException { assert localModel == null || rescoreOccurred : "Unnecessarily populated local model object"; @@ -270,7 +275,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(NAME); + builder.startObject(NAME.getPreferredName()); builder.field(MODEL_FIELD.getPreferredName(), modelId); if (this.params != null) { builder.field(PARAMS_FIELD.getPreferredName(), this.params); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java index 11b699df66b83..0bfc64c9b0027 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java @@ -69,7 +69,7 @@ public Vocabulary(StreamInput in) throws IOException { } else { merges = List.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { scores = in.readCollectionAsList(StreamInput::readDouble); } else { scores = List.of(); @@ -95,7 +95,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { out.writeStringCollection(merges); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeCollection(scores, StreamOutput::writeDouble); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java index d267966a1d795..b502e0d6db341 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; @@ -30,6 +29,7 @@ import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.Numbers; @@ -52,6 +52,7 @@ import org.elasticsearch.index.reindex.DeleteByQueryRequest; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.metrics.Sum; @@ -419,7 +420,7 @@ public void getTrainedModelMetadata( })); } - public void refreshInferenceIndex(ActionListener listener) { + public void refreshInferenceIndex(ActionListener listener) { executeAsyncWithOrigin( client, ML_ORIGIN, @@ -663,7 +664,7 @@ public void getTrainedModel( ActionListener trainedModelSearchHandler = ActionListener.wrap(modelSearchResponse -> { TrainedModelConfig.Builder builder; try { - builder = handleHits(modelSearchResponse.getHits().getHits(), modelId, this::parseModelConfigLenientlyFromSource).get(0); + builder = handleHits(modelSearchResponse.getHits(), modelId, this::parseModelConfigLenientlyFromSource).get(0); } catch (ResourceNotFoundException ex) { getTrainedModelListener.onFailure( new ResourceNotFoundException(Messages.getMessage(Messages.INFERENCE_NOT_FOUND, modelId)) @@ -701,7 +702,7 @@ public void getTrainedModel( ActionListener.wrap(definitionSearchResponse -> { try { List docs = handleHits( - definitionSearchResponse.getHits().getHits(), + definitionSearchResponse.getHits(), modelId, (bytes, resourceId) -> ChunkedTrainedModelRestorer.parseModelDefinitionDocLenientlyFromSource( bytes, @@ -1268,15 +1269,15 @@ private static Set matchedResourceIds(String[] tokens) { } private static List handleHits( - SearchHit[] hits, + SearchHits hits, String resourceId, CheckedBiFunction parseLeniently ) throws Exception { - if (hits.length == 0) { + if (hits.getHits().length == 0) { throw new ResourceNotFoundException(resourceId); } - List results = new ArrayList<>(hits.length); - String initialIndex = hits[0].getIndex(); + List results = new ArrayList<>(hits.getHits().length); + String initialIndex = hits.getAt(0).getIndex(); for (SearchHit hit : hits) { // We don't want to spread across multiple backing indices if (hit.getIndex().equals(initialIndex)) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java index ac16948e32ed6..577bbe3dac6ce 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchResponse; @@ -23,6 +22,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.TransportMultiSearchAction; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; @@ -451,7 +451,7 @@ private void deleteResultsByQuery( ) { assert indices.length > 0; - ActionListener refreshListener = ActionListener.wrap(refreshResponse -> { + ActionListener refreshListener = ActionListener.wrap(refreshResponse -> { logger.info("[{}] running delete by query on [{}]", jobId, String.join(", ", indices)); ConstantScoreQueryBuilder query = new ConstantScoreQueryBuilder(new TermQueryBuilder(Job.ID.getPreferredName(), jobId)); DeleteByQueryRequest request = new DeleteByQueryRequest(indices).setQuery(query) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastActionTests.java index 92ceb536cfd43..29a8a35ff0fdd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastActionTests.java @@ -86,7 +86,7 @@ private static SearchHit createForecastStatsHit(ForecastRequestStats.ForecastReq ForecastRequestStats.STATUS.getPreferredName(), new DocumentField(ForecastRequestStats.STATUS.getPreferredName(), Collections.singletonList(status.toString())) ); - SearchHit hit = new SearchHit(0, ""); + SearchHit hit = SearchHit.unpooled(0, ""); hit.addDocumentFields(documentFields, Map.of()); return hit; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelActionTests.java index 4f1a99f634a0a..feb35195e3e38 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelActionTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.elasticsearch.action.support.PlainActionFuture; @@ -57,7 +56,7 @@ public void tearDownThreadPool() { public void testCancelDownloadTaskCallsListenerWithNullWhenNoTasksExist() { var client = mockClientWithTasksResponse(Collections.emptyList(), threadPool); - var listener = new PlainActionFuture(); + var listener = new PlainActionFuture(); cancelDownloadTask(client, "modelId", listener, TIMEOUT); @@ -70,13 +69,13 @@ public void testCancelDownloadTaskCallsOnFailureWithErrorWhenCancellingFailsWith doAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; listener.onFailure(new Exception("cancel error")); return Void.TYPE; }).when(client).execute(same(CancelTasksAction.INSTANCE), any(), any()); - var listener = new PlainActionFuture(); + var listener = new PlainActionFuture(); cancelDownloadTask(client, "modelId", listener, TIMEOUT); @@ -91,13 +90,13 @@ public void testCancelDownloadTaskCallsOnResponseNullWhenTheTaskNoLongerExistsWh doAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; listener.onFailure(new ResourceNotFoundException("task no longer there")); return Void.TYPE; }).when(client).execute(same(CancelTasksAction.INSTANCE), any(), any()); - var listener = new PlainActionFuture(); + var listener = new PlainActionFuture(); cancelDownloadTask(client, "modelId", listener, TIMEOUT); @@ -115,7 +114,7 @@ public void testCancelDownloadTasksCallsGetsUnableToRetrieveTaskInfoError() { return Void.TYPE; }).when(client).execute(same(TransportListTasksAction.TYPE), any(), any()); - var listener = new PlainActionFuture(); + var listener = new PlainActionFuture(); cancelDownloadTask(client, "modelId", listener, TIMEOUT); @@ -127,10 +126,10 @@ public void testCancelDownloadTasksCallsGetsUnableToRetrieveTaskInfoError() { public void testCancelDownloadTaskCallsOnResponseWithTheCancelResponseWhenATaskExists() { var client = mockClientWithTasksResponse(getTaskInfoListOfOne(), threadPool); - var cancelResponse = mock(CancelTasksResponse.class); + var cancelResponse = mock(ListTasksResponse.class); mockCancelTasksResponse(client, cancelResponse); - var listener = new PlainActionFuture(); + var listener = new PlainActionFuture(); cancelDownloadTask(client, "modelId", listener, TIMEOUT); @@ -142,12 +141,12 @@ private static void mockCancelTask(Client client) { when(cluster.prepareCancelTasks()).thenReturn(new CancelTasksRequestBuilder(client)); } - private static void mockCancelTasksResponse(Client client, CancelTasksResponse response) { + private static void mockCancelTasksResponse(Client client, ListTasksResponse response) { mockCancelTask(client); doAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; listener.onResponse(response); return Void.TYPE; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java index 12ce45a186d62..4bbaafa9db0cd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java @@ -555,7 +555,8 @@ private SearchResponse createSearchResponse(long totalHits, long earliestTime, l SearchResponse searchResponse = mock(SearchResponse.class); when(searchResponse.status()).thenReturn(RestStatus.OK); SearchHit[] hits = new SearchHit[(int) totalHits]; - SearchHits searchHits = new SearchHits(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), 1); + Arrays.fill(hits, SearchHit.unpooled(1)); + SearchHits searchHits = SearchHits.unpooled(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), 1); when(searchResponse.getHits()).thenReturn(searchHits); List aggs = new ArrayList<>(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java index bf7aa465ee604..2dd17e434cccb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java @@ -547,7 +547,8 @@ private SearchResponse createSearchResponse(List timestamps, List hits.add(hit); } SearchHits searchHits = new SearchHits(hits.toArray(SearchHits.EMPTY), new TotalHits(hits.size(), TotalHits.Relation.EQUAL_TO), 1); - when(searchResponse.getHits()).thenReturn(searchHits); + when(searchResponse.getHits()).thenReturn(searchHits.asUnpooled()); + searchHits.decRef(); when(searchResponse.getTook()).thenReturn(TimeValue.timeValueMillis(randomNonNegativeLong())); return searchResponse; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTaskTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTaskTests.java index 63afc4ef6659c..8d8cded819e23 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTaskTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTaskTests.java @@ -243,10 +243,12 @@ public void testPersistProgress_ProgressDocumentCreated() throws IOException { } public void testPersistProgress_ProgressDocumentUpdated() throws IOException { - testPersistProgress( - new SearchHits(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f), - ".ml-state-dummy" - ); + var hits = new SearchHits(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f); + try { + testPersistProgress(hits, ".ml-state-dummy"); + } finally { + hits.decRef(); + } } public void testSetFailed() throws IOException { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java index 7bc3d507ecf22..993e00bd4adf4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java @@ -654,7 +654,8 @@ private SearchResponse createSearchResponse(List field1Values, List buildSearchHits(List> vals) { - return vals.stream() - .map(InferenceRunnerTests::fromMap) - .map(reference -> SearchHit.createFromMap(Collections.singletonMap("_source", reference))) - .collect(Collectors.toCollection(ArrayDeque::new)); + return vals.stream().map(InferenceRunnerTests::fromMap).map(reference -> { + var pooled = SearchHit.createFromMap(Collections.singletonMap("_source", reference)); + try { + return pooled.asUnpooled(); + } finally { + pooled.decRef(); + } + }).collect(Collectors.toCollection(ArrayDeque::new)); } private static BytesReference fromMap(Map map) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersisterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersisterTests.java index d9176b74d2d3f..c308f95d483a5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersisterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersisterTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.ml.dataframe.process; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.license.License; import org.elasticsearch.test.ESTestCase; @@ -102,7 +102,7 @@ public void testPersistAllDocs() { }).when(trainedModelProvider).storeTrainedModelMetadata(any(TrainedModelMetadata.class), any(ActionListener.class)); doAnswer(invocationOnMock -> { - ActionListener storeListener = (ActionListener) invocationOnMock.getArguments()[0]; + ActionListener storeListener = (ActionListener) invocationOnMock.getArguments()[0]; storeListener.onResponse(null); return null; }).when(trainedModelProvider).refreshInferenceIndex(any(ActionListener.class)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoinerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoinerTests.java index 99dfd9e919a6a..3a95a3bb65f10 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoinerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoinerTests.java @@ -309,7 +309,7 @@ private void givenDataFrameBatches(List> batche } private static SearchHit newHit(String json) { - SearchHit hit = new SearchHit(randomInt(), randomAlphaOfLength(10)); + SearchHit hit = SearchHit.unpooled(randomInt(), randomAlphaOfLength(10)); hit.sourceRef(new BytesArray(json)); return hit; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilderSerializationTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilderSerializationTests.java index 79044a465442b..f52d05fc3220d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilderSerializationTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilderSerializationTests.java @@ -9,14 +9,19 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.rescore.RescorerBuilder; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearningToRankConfig; @@ -25,48 +30,36 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; -import static org.elasticsearch.search.rank.RankBuilder.WINDOW_SIZE_FIELD; import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearningToRankConfigTests.randomLearningToRankConfig; +import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class LearningToRankRescorerBuilderSerializationTests extends AbstractBWCSerializationTestCase { private static LearningToRankService learningToRankService = mock(LearningToRankService.class); - @Override - protected LearningToRankRescorerBuilder doParseInstance(XContentParser parser) throws IOException { - String fieldName = null; - LearningToRankRescorerBuilder rescorer = null; - Integer windowSize = null; - XContentParser.Token token = parser.nextToken(); - assert token == XContentParser.Token.START_OBJECT; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } else if (token.isValue()) { - if (WINDOW_SIZE_FIELD.match(fieldName, parser.getDeprecationHandler())) { - windowSize = parser.intValue(); - } else { - throw new ParsingException(parser.getTokenLocation(), "rescore doesn't support [" + fieldName + "]"); + public void testRequiredWindowSize() throws IOException { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + LearningToRankRescorerBuilder testInstance = createTestInstance(); + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + testInstance.doXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + try (XContentParser parser = JsonXContent.jsonXContent.createParser(parserConfig(), Strings.toString(builder))) { + ParsingException e = expectThrows(ParsingException.class, () -> RescorerBuilder.parseFromXContent(parser, (r) -> {})); + assertThat(e.getMessage(), equalTo("window_size is required for rescorer of type [learning_to_rank]")); } - } else if (token == XContentParser.Token.START_OBJECT) { - rescorer = LearningToRankRescorerBuilder.fromXContent(parser, learningToRankService); - } else { - throw new ParsingException(parser.getTokenLocation(), "unexpected token [" + token + "] after [" + fieldName + "]"); } } - if (rescorer == null) { - throw new ParsingException(parser.getTokenLocation(), "missing rescore type"); - } - if (windowSize != null) { - rescorer.windowSize(windowSize); - } - return rescorer; + } + + @Override + protected LearningToRankRescorerBuilder doParseInstance(XContentParser parser) throws IOException { + return (LearningToRankRescorerBuilder) RescorerBuilder.parseFromXContent(parser, (r) -> {}); } @Override @@ -85,76 +78,49 @@ protected LearningToRankRescorerBuilder createTestInstance() { learningToRankService ); - if (randomBoolean()) { - builder.windowSize(randomIntBetween(1, 10000)); - } + builder.windowSize(randomIntBetween(1, 10000)); return builder; } @Override protected LearningToRankRescorerBuilder createXContextTestInstance(XContentType xContentType) { - return new LearningToRankRescorerBuilder(randomAlphaOfLength(10), randomBoolean() ? randomParams() : null, learningToRankService); + return new LearningToRankRescorerBuilder(randomAlphaOfLength(10), randomBoolean() ? randomParams() : null, learningToRankService) + .windowSize(randomIntBetween(1, 10000)); } @Override protected LearningToRankRescorerBuilder mutateInstance(LearningToRankRescorerBuilder instance) throws IOException { - int i = randomInt(4); return switch (i) { - case 0 -> { - LearningToRankRescorerBuilder builder = new LearningToRankRescorerBuilder( - randomValueOtherThan(instance.modelId(), () -> randomAlphaOfLength(10)), - instance.params(), - learningToRankService - ); - if (instance.windowSize() != null) { - builder.windowSize(instance.windowSize()); - } - yield builder; - } + case 0 -> new LearningToRankRescorerBuilder( + randomValueOtherThan(instance.modelId(), () -> randomAlphaOfLength(10)), + instance.params(), + learningToRankService + ).windowSize(instance.windowSize()); case 1 -> new LearningToRankRescorerBuilder(instance.modelId(), instance.params(), learningToRankService).windowSize( randomValueOtherThan(instance.windowSize(), () -> randomIntBetween(1, 10000)) ); - case 2 -> { - LearningToRankRescorerBuilder builder = new LearningToRankRescorerBuilder( - instance.modelId(), - randomValueOtherThan(instance.params(), () -> (randomBoolean() ? randomParams() : null)), - learningToRankService - ); - if (instance.windowSize() != null) { - builder.windowSize(instance.windowSize() + 1); - } - yield builder; - } + case 2 -> new LearningToRankRescorerBuilder( + instance.modelId(), + randomValueOtherThan(instance.params(), () -> (randomBoolean() ? randomParams() : null)), + learningToRankService + ).windowSize(instance.windowSize()); case 3 -> { LearningToRankConfig learningToRankConfig = randomValueOtherThan( instance.learningToRankConfig(), () -> randomLearningToRankConfig() ); - LearningToRankRescorerBuilder builder = new LearningToRankRescorerBuilder( - instance.modelId(), - learningToRankConfig, - null, - learningToRankService + yield new LearningToRankRescorerBuilder(instance.modelId(), learningToRankConfig, null, learningToRankService).windowSize( + instance.windowSize() ); - if (instance.windowSize() != null) { - builder.windowSize(instance.windowSize()); - } - yield builder; - } - case 4 -> { - LearningToRankRescorerBuilder builder = new LearningToRankRescorerBuilder( - mock(LocalModel.class), - instance.learningToRankConfig(), - instance.params(), - learningToRankService - ); - if (instance.windowSize() != null) { - builder.windowSize(instance.windowSize()); - } - yield builder; } + case 4 -> new LearningToRankRescorerBuilder( + mock(LocalModel.class), + instance.learningToRankConfig(), + instance.params(), + learningToRankService + ).windowSize(instance.windowSize()); default -> throw new AssertionError("Unexpected random test case"); }; } @@ -169,31 +135,38 @@ protected NamedXContentRegistry xContentRegistry() { List namedXContent = new ArrayList<>(); namedXContent.addAll(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); namedXContent.addAll(new MlLTRNamedXContentProvider().getNamedXContentParsers()); - namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); + namedXContent.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedXContents()); + namedXContent.add( + new NamedXContentRegistry.Entry( + RescorerBuilder.class, + LearningToRankRescorerBuilder.NAME, + (p, c) -> LearningToRankRescorerBuilder.fromXContent(p, learningToRankService) + ) + ); return new NamedXContentRegistry(namedXContent); } + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return writableRegistry(); + } + @Override protected NamedWriteableRegistry writableRegistry() { List namedWriteables = new ArrayList<>(new MlInferenceNamedXContentProvider().getNamedWriteables()); namedWriteables.addAll(new MlLTRNamedXContentProvider().getNamedWriteables()); - namedWriteables.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedWriteables()); + namedWriteables.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedWriteables()); + namedWriteables.add( + new NamedWriteableRegistry.Entry( + RescorerBuilder.class, + LearningToRankRescorerBuilder.NAME.getPreferredName(), + in -> new LearningToRankRescorerBuilder(in, learningToRankService) + ) + ); return new NamedWriteableRegistry(namedWriteables); } - @Override - protected NamedWriteableRegistry getNamedWriteableRegistry() { - return writableRegistry(); - } - private static Map randomParams() { return randomMap(1, randomIntBetween(1, 10), () -> new Tuple<>(randomIdentifier(), randomIdentifier())); } - - private static LocalModel localModelMock() { - LocalModel model = mock(LocalModel.class); - String modelId = randomIdentifier(); - when(model.getModelId()).thenReturn(modelId); - return model; - } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java index baae42b99640f..db81fc2db3348 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java @@ -358,10 +358,12 @@ public void testPersistQuantilesSync_QuantilesDocumentCreated() { } public void testPersistQuantilesSync_QuantilesDocumentUpdated() { - testPersistQuantilesSync( - new SearchHits(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f), - ".ml-state-dummy" - ); + var hits = new SearchHits(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f); + try { + testPersistQuantilesSync(hits, ".ml-state-dummy"); + } finally { + hits.decRef(); + } } @SuppressWarnings("unchecked") @@ -397,10 +399,12 @@ public void testPersistQuantilesAsync_QuantilesDocumentCreated() { } public void testPersistQuantilesAsync_QuantilesDocumentUpdated() { - testPersistQuantilesAsync( - new SearchHits(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f), - ".ml-state-dummy" - ); + var hits = new SearchHits(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f); + try { + testPersistQuantilesAsync(hits, ".ml-state-dummy"); + } finally { + hits.decRef(); + } } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java index 8179a97955a57..3dcbbeb3fcce5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java @@ -928,7 +928,8 @@ private static SearchResponse createSearchResponse(List> sou list.add(hit); } SearchHits hits = new SearchHits(list.toArray(SearchHits.EMPTY), new TotalHits(source.size(), TotalHits.Relation.EQUAL_TO), 1); - when(response.getHits()).thenReturn(hits); + when(response.getHits()).thenReturn(hits.asUnpooled()); + hits.decRef(); return response; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java index 59a79def9bd10..33e5582ec992a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java @@ -137,7 +137,8 @@ public MockClientBuilder prepareSearch(String indexName, List do SearchResponse response = mock(SearchResponse.class); SearchHits searchHits = new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0.0f); - when(response.getHits()).thenReturn(searchHits); + when(response.getHits()).thenReturn(searchHits.asUnpooled()); + searchHits.decRef(); doAnswer(new Answer() { @Override @@ -176,7 +177,8 @@ public MockClientBuilder prepareSearchFields(String indexName, List() { @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamerTests.java index 90280bc08de17..47f7d8c65a27a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamerTests.java @@ -107,7 +107,8 @@ private static SearchResponse createSearchResponse(List> sou hits[i++] = hit; } SearchHits searchHits = new SearchHits(hits, null, (float) 0.0); - when(searchResponse.getHits()).thenReturn(searchHits); + when(searchResponse.getHits()).thenReturn(searchHits.asUnpooled()); + searchHits.decRef(); return searchResponse; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java index 3048a1144ac55..6ec43ca2a3201 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java @@ -97,7 +97,8 @@ static SearchResponse createSearchResponseFromHits(List hits) { 1.0f ); SearchResponse searchResponse = mock(SearchResponse.class); - when(searchResponse.getHits()).thenReturn(searchHits); + when(searchResponse.getHits()).thenReturn(searchHits.asUnpooled()); + searchHits.decRef(); return searchResponse; } @@ -111,7 +112,8 @@ private static SearchResponse createSearchResponse(List to } SearchHits hits = new SearchHits(hitsArray, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), 1.0f); SearchResponse searchResponse = mock(SearchResponse.class); - when(searchResponse.getHits()).thenReturn(hits); + when(searchResponse.getHits()).thenReturn(hits.asUnpooled()); + hits.decRef(); return searchResponse; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessorTests.java index 520efd5e77244..a7ba148584637 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessorTests.java @@ -124,7 +124,7 @@ public void testStateRead_StateDocumentCreated() throws IOException { public void testStateRead_StateDocumentUpdated() throws IOException { testStateRead( - new SearchHits(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f), + SearchHits.unpooled(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f), ".ml-state-dummy" ); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/test/SearchHitBuilder.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/test/SearchHitBuilder.java index f2affbe6d2869..59a3b86ef0bd5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/test/SearchHitBuilder.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/test/SearchHitBuilder.java @@ -22,7 +22,7 @@ public class SearchHitBuilder { private final SearchHit hit; public SearchHitBuilder(int docId) { - hit = new SearchHit(docId, null); + hit = SearchHit.unpooled(docId, null); } public SearchHitBuilder addField(String name, Object value) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java index 939ccde7df6c4..015614e56c02b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java @@ -27,7 +27,7 @@ public class TransportVersionUtilsTests extends ESTestCase { "Bertram", new CompatibilityVersions(TransportVersions.V_7_0_1, Map.of()), "Charles", - new CompatibilityVersions(TransportVersions.V_8_500_020, Map.of()), + new CompatibilityVersions(TransportVersions.V_8_9_X, Map.of()), "Dominic", new CompatibilityVersions(TransportVersions.V_8_0_0, Map.of()) ); @@ -79,6 +79,6 @@ public void testIsMinTransformVersionSameAsCurrent() { public void testIsMinTransportVersionOnOrAfter() { assertThat(TransportVersionUtils.isMinTransportVersionOnOrAfter(state, TransportVersions.V_7_0_0), equalTo(true)); - assertThat(TransportVersionUtils.isMinTransportVersionOnOrAfter(state, TransportVersions.V_8_500_020), equalTo(false)); + assertThat(TransportVersionUtils.isMinTransportVersionOnOrAfter(state, TransportVersions.V_8_9_X), equalTo(false)); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java index 4f1308e9295c2..4fded8ef8d05d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java @@ -172,7 +172,8 @@ protected SearchResponse createSearchResponseWithHits(String... hits) { SearchHits searchHits = createHits(hits); SearchResponse searchResponse = mock(SearchResponse.class); when(searchResponse.getScrollId()).thenReturn(SCROLL_ID); - when(searchResponse.getHits()).thenReturn(searchHits); + when(searchResponse.getHits()).thenReturn(searchHits.asUnpooled()); + searchHits.decRef(); return searchResponse; } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java index eaec54ca9c1a3..753700a7ec913 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @@ -77,7 +77,7 @@ public class MonitoringTemplateRegistry extends IndexTemplateRegistry { * writes monitoring data in ECS format as of 8.0. These templates define the ECS schema as well as alias fields for the old monitoring * mappings that point to the corresponding ECS fields. */ - public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 13; + public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 14; private static final String STACK_MONITORING_REGISTRY_VERSION_VARIABLE = "xpack.stack.monitoring.template.release.version"; private static final String STACK_TEMPLATE_VERSION = "8"; private static final String STACK_TEMPLATE_VERSION_VARIABLE = "xpack.stack.monitoring.template.version"; diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java index fa28877f5b4c1..ef5198499ff09 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java @@ -10,7 +10,6 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.logging.log4j.LogManager; -import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; @@ -49,7 +48,6 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103809") public class CancellationIT extends ProfilingTestCase { @Override protected Collection> nodePlugins() { diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java index e0e4ef2a12985..8553574d39646 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java @@ -7,9 +7,6 @@ package org.elasticsearch.xpack.profiling; -import org.apache.lucene.tests.util.LuceneTestCase; - -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103809") public class GetFlameGraphActionIT extends ProfilingTestCase { public void testGetStackTracesUnfiltered() throws Exception { GetStackTracesRequest request = new GetStackTracesRequest(1000, 600.0d, 1.0d, null, null, null, null, null, null, null, null); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java index 9c60a6bcdfc1c..098023ad1841a 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java @@ -7,13 +7,11 @@ package org.elasticsearch.xpack.profiling; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; import java.util.List; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103809") public class GetStackTracesActionIT extends ProfilingTestCase { public void testGetStackTracesUnfiltered() throws Exception { GetStackTracesRequest request = new GetStackTracesRequest(1000, 600.0d, 1.0d, null, null, null, null, null, null, null, null); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java index 8dbab6e8c06a5..f3417dbf5d472 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java @@ -52,10 +52,11 @@ public void testNoTimeoutIfNotWaiting() throws Exception { assertFalse(response.hasData()); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104035") public void testWaitsUntilResourcesAreCreated() throws Exception { updateProfilingTemplatesEnabled(true); GetStatusAction.Request request = new GetStatusAction.Request(); + // higher timeout since we have more shards than usual + request.timeout(TimeValue.timeValueSeconds(120)); request.waitForResourcesCreated(true); GetStatusAction.Response response = client().execute(GetStatusAction.INSTANCE, request).get(); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java index 82d6f6193505d..6424c0f3ae259 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.datastreams.DataStreamsPlugin; import org.elasticsearch.license.LicenseSettings; import org.elasticsearch.plugins.Plugin; @@ -127,7 +128,8 @@ protected final void doSetupData() throws Exception { ); allIndices.add(apmTestIndex); waitForIndices(allIndices); - ensureGreen(allIndices.toArray(new String[0])); + // higher timeout since we have more shards than usual + ensureGreen(TimeValue.timeValueSeconds(120), allIndices.toArray(new String[0])); bulkIndex("data/profiling-events-all.ndjson"); bulkIndex("data/profiling-stacktraces.ndjson"); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CO2Calculator.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CO2Calculator.java index 1e44cba4e62b2..4013afd2002f2 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CO2Calculator.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CO2Calculator.java @@ -51,12 +51,7 @@ public double getAnnualCO2Tons(String hostID, long samples) { return DEFAULT_KILOWATTS_PER_CORE * customCO2PerKWH * annualCoreHours * customDatacenterPUE; } - CostEntry costs = InstanceTypeService.getCosts(host.instanceType); - if (costs == null) { - return getKiloWattsPerCore(host) * getCO2TonsPerKWH(host) * annualCoreHours * getDatacenterPUE(host); - } - - return annualCoreHours * costs.co2Factor; // unit: metric tons + return getKiloWattsPerCore(host) * getCO2TonsPerKWH(host) * annualCoreHours * getDatacenterPUE(host); } private double getKiloWattsPerCore(HostMetadata host) { diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostEntry.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostEntry.java index 6033e650072bc..8d5765fa97c51 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostEntry.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostEntry.java @@ -14,22 +14,19 @@ import java.util.Map; final class CostEntry implements ToXContentObject { - final double co2Factor; final double costFactor; - CostEntry(double co2Factor, double costFactor) { - this.co2Factor = co2Factor; + CostEntry(double costFactor) { this.costFactor = costFactor; } public static CostEntry fromSource(Map source) { - return new CostEntry((Double) source.get("co2_factor"), (Double) source.get("cost_factor")); + return new CostEntry((Double) source.get("cost_factor")); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field("co2_factor", this.co2Factor); builder.field("cost_factor", this.costFactor); builder.endObject(); return builder; diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java index 004eae1395dc1..d918a0def7ebb 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.IndicesOptions; @@ -177,7 +178,17 @@ private void execute(ClusterState state, ActionListener { + // no data yet + if (e instanceof SearchPhaseExecutionException) { + log.trace("Has data check has failed.", e); + listener.onResponse( + new GetStatusAction.Response(pluginEnabled, resourceManagementEnabled, resourcesCreated, anyPre891Data, false) + ); + } else { + listener.onFailure(e); + } + })); } else { listener.onResponse(new GetStatusAction.Response(pluginEnabled, resourceManagementEnabled, false, anyPre891Data, false)); } diff --git a/x-pack/plugin/profiling/src/main/resources/profiling-costs.json.gz b/x-pack/plugin/profiling/src/main/resources/profiling-costs.json.gz index e54b3175c7237..1258fb7344b62 100644 Binary files a/x-pack/plugin/profiling/src/main/resources/profiling-costs.json.gz and b/x-pack/plugin/profiling/src/main/resources/profiling-costs.json.gz differ diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CO2CalculatorTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CO2CalculatorTests.java index dadd541808300..eb0a2f056044b 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CO2CalculatorTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CO2CalculatorTests.java @@ -72,17 +72,12 @@ public void testCreateFromRegularSource() { double annualCoreHours = CostCalculator.annualCoreHours(samplingDurationInSeconds, samples, 20.0d); CO2Calculator co2Calculator = new CO2Calculator(hostsTable, samplingDurationInSeconds, null, null, null, null); - checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_A, samples), annualCoreHours, 0.000002213477d); + checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_A, samples), annualCoreHours, 1.135d, 0.0002786d, 7.0d); checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_B, samples), annualCoreHours, 1.1d, 0.00004452d, 7.0d); checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_C, samples), annualCoreHours, 1.185d, 0.000410608d, 2.8d); checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_D, samples), annualCoreHours, 1.7d, 0.000379069d, 2.8d); } - private void checkCO2Calculation(double calculatedAnnualCO2Tons, double annualCoreHours, double co2Factor) { - double expectedAnnualCO2Tons = annualCoreHours * co2Factor; - assertEquals(expectedAnnualCO2Tons, calculatedAnnualCO2Tons, 0.000000000001d); - } - private void checkCO2Calculation( double calculatedAnnualCO2Tons, double annualCoreHours, diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Expressions.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Expressions.java index 41955797a7e1c..de39cfcc0c910 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Expressions.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Expressions.java @@ -28,7 +28,7 @@ public final class Expressions { private Expressions() {} public static NamedExpression wrapAsNamed(Expression exp) { - return exp instanceof NamedExpression ? (NamedExpression) exp : new Alias(exp.source(), exp.sourceText(), exp); + return exp instanceof NamedExpression ne ? ne : new Alias(exp.source(), exp.sourceText(), exp); } public static List asAttributes(List named) { @@ -136,7 +136,7 @@ public static AttributeSet references(List exps) { } public static String name(Expression e) { - return e instanceof NamedExpression ? ((NamedExpression) e).name() : e.sourceText(); + return e instanceof NamedExpression ne ? ne.name() : e.sourceText(); } public static boolean isNull(Expression e) { @@ -153,8 +153,8 @@ public static List names(Collection e) { } public static Attribute attribute(Expression e) { - if (e instanceof NamedExpression) { - return ((NamedExpression) e).toAttribute(); + if (e instanceof NamedExpression ne) { + return ne.toAttribute(); } return null; } @@ -175,8 +175,8 @@ public static List> aliases(List> aliases = new ArrayList<>(); for (NamedExpression ne : named) { - if (ne instanceof Alias) { - aliases.add(new Tuple<>(ne.toAttribute(), ((Alias) ne).child())); + if (ne instanceof Alias as) { + aliases.add(new Tuple<>(ne.toAttribute(), as.child())); } } return aliases; @@ -218,11 +218,11 @@ public static Pipe pipe(Expression e) { if (e.foldable()) { return new ConstantInput(e.source(), e, e.fold()); } - if (e instanceof NamedExpression) { - return new AttributeInput(e.source(), e, ((NamedExpression) e).toAttribute()); + if (e instanceof NamedExpression ne) { + return new AttributeInput(e.source(), e, ne.toAttribute()); } - if (e instanceof Function) { - return ((Function) e).asPipe(); + if (e instanceof Function f) { + return f.asPipe(); } throw new QlIllegalArgumentException("Cannot create pipe for {}", e); } diff --git a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/tree/NodeSubclassTests.java b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/tree/NodeSubclassTests.java index 6393a3d6b9d67..a62acab36bdff 100644 --- a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/tree/NodeSubclassTests.java +++ b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/tree/NodeSubclassTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.FieldAttribute; import org.elasticsearch.xpack.ql.expression.UnresolvedAttributeTests; @@ -474,6 +475,10 @@ public boolean equals(Object obj) { */ return UnresolvedAttributeTests.randomUnresolvedAttribute(); } + if (EnrichPolicy.class == argClass) { + List enrichFields = randomSubsetOf(List.of("e1", "e2", "e3")); + return new EnrichPolicy(randomFrom("match", "range"), null, List.of(), randomFrom("m1", "m2"), enrichFields); + } if (Pipe.class == argClass) { /* diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index 68b5b8953ccb7..bf979f9deabf0 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -11,12 +11,12 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.common.scheduler.SchedulerEngine; @@ -164,10 +164,10 @@ protected void doSaveState(IndexerState indexerState, Map positi @Override protected void onFinish(ActionListener listener) { final RollupJobConfig jobConfig = job.getConfig(); - final ActionListener refreshResponseActionListener = new ActionListener<>() { + final ActionListener refreshResponseActionListener = new ActionListener<>() { @Override - public void onResponse(RefreshResponse refreshResponse) { + public void onResponse(BroadcastResponse refreshResponse) { logger.trace("refreshing rollup index {} successful for job {}", jobConfig.getRollupIndex(), jobConfig.getId()); listener.onResponse(null); } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java index 7fcde59f73088..430ba6d6faec5 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java @@ -11,10 +11,10 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.scheduler.SchedulerEngine; import org.elasticsearch.common.settings.Settings; @@ -590,7 +590,7 @@ public void testTriggerWithoutHeaders() throws Exception { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); Client client = mock(Client.class); doAnswer(invocationOnMock -> { - RefreshResponse r = new RefreshResponse(2, 2, 0, Collections.emptyList()); + BroadcastResponse r = new BroadcastResponse(2, 2, 0, Collections.emptyList()); ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(r); return null; }).when(client).execute(eq(RefreshAction.INSTANCE), any(), any()); @@ -697,7 +697,7 @@ public void testTriggerWithHeaders() throws Exception { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), headers); Client client = mock(Client.class); doAnswer(invocationOnMock -> { - RefreshResponse r = new RefreshResponse(2, 2, 0, Collections.emptyList()); + BroadcastResponse r = new BroadcastResponse(2, 2, 0, Collections.emptyList()); ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(r); return null; }).when(client).execute(eq(RefreshAction.INSTANCE), any(), any()); @@ -806,7 +806,7 @@ public void testSaveStateChangesIDScheme() throws Exception { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), headers); Client client = mock(Client.class); doAnswer(invocationOnMock -> { - RefreshResponse r = new RefreshResponse(2, 2, 0, Collections.emptyList()); + BroadcastResponse r = new BroadcastResponse(2, 2, 0, Collections.emptyList()); ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(r); return null; }).when(client).execute(eq(RefreshAction.INSTANCE), any(), any()); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java index 7ee81b444af46..a31d016c143ae 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.Metadata; @@ -31,7 +32,6 @@ import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest; import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheAction; import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheRequest; -import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheResponse; import org.elasticsearch.xpack.searchablesnapshots.action.SearchableSnapshotsStatsAction; import org.elasticsearch.xpack.searchablesnapshots.action.SearchableSnapshotsStatsRequest; import org.elasticsearch.xpack.searchablesnapshots.action.SearchableSnapshotsStatsResponse; @@ -121,11 +121,11 @@ public void testStatsRequiresLicense() throws ExecutionException, InterruptedExc } public void testClearCacheRequiresLicense() throws ExecutionException, InterruptedException { - final ActionFuture future = client().execute( + final ActionFuture future = client().execute( ClearSearchableSnapshotsCacheAction.INSTANCE, new ClearSearchableSnapshotsCacheRequest(indexName) ); - final ClearSearchableSnapshotsCacheResponse response = future.get(); + final BroadcastResponse response = future.get(); assertThat(response.getTotalShards(), greaterThan(0)); assertThat(response.getSuccessfulShards(), equalTo(0)); for (DefaultShardOperationFailedException shardFailure : response.getShardFailures()) { diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java index 8d115d0f19580..e4f9d530e83df 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.blobcache.shared.SharedBlobCacheService; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterInfoServiceUtils; +import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.DiskUsageIntegTestCase; import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.metadata.RepositoryMetadata; @@ -232,10 +233,8 @@ public void testHighWatermarkCanNotBeExceededOnColdNode() throws Exception { final var masterInfoService = (InternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance(ClusterInfoService.class); ClusterInfoServiceUtils.refresh(masterInfoService); - assertThat( - masterInfoService.getClusterInfo().getNodeMostAvailableDiskUsages().get(otherDataNodeId).getTotalBytes(), - equalTo(totalSpace) - ); + DiskUsage usage = masterInfoService.getClusterInfo().getNodeMostAvailableDiskUsages().get(otherDataNodeId); + assertThat(usage.totalBytes(), equalTo(totalSpace)); mountIndices(indicesStoresSizes.keySet(), "mounted-", repositoryName, snapshot, storage); @@ -309,10 +308,8 @@ public void testHighWatermarkCanNotBeExceededWithInitializingSearchableSnapshots ClusterInfoService.class ); ClusterInfoServiceUtils.refresh(masterInfoService); - assertThat( - masterInfoService.getClusterInfo().getNodeMostAvailableDiskUsages().get(coldNodeId).getTotalBytes(), - equalTo(totalSpace) - ); + DiskUsage usage = masterInfoService.getClusterInfo().getNodeMostAvailableDiskUsages().get(coldNodeId); + assertThat(usage.totalBytes(), equalTo(totalSpace)); String prefix = "mounted-"; mountIndices(indicesToBeMounted.keySet(), prefix, repositoryName, snapshotName, FULL_COPY); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java index 37b3ecfd36959..c1c40acbd43c5 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java @@ -8,10 +8,9 @@ package org.elasticsearch.xpack.searchablesnapshots.cache.blob; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.blobcache.shared.SharedBlobCacheService; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; @@ -139,7 +138,7 @@ public void testBlobStoreCache() throws Exception { if (randomBoolean()) { logger.info("--> force-merging index before snapshotting"); - final ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge(indexName).setMaxNumSegments(1).get(); + final BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge(indexName).setMaxNumSegments(1).get(); assertThat(forceMergeResponse.getSuccessfulShards(), equalTo(numberOfShards.totalNumShards)); assertThat(forceMergeResponse.getFailedShards(), equalTo(0)); } @@ -355,7 +354,7 @@ private Client systemClient() { private void refreshSystemIndex() { try { - final RefreshResponse refreshResponse = systemClient().admin().indices().prepareRefresh(SNAPSHOT_BLOB_CACHE_INDEX).get(); + final BroadcastResponse refreshResponse = systemClient().admin().indices().prepareRefresh(SNAPSHOT_BLOB_CACHE_INDEX).get(); assertThat(refreshResponse.getSuccessfulShards(), greaterThan(0)); assertThat(refreshResponse.getFailedShards(), equalTo(0)); } catch (IndexNotFoundException indexNotFoundException) { diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java index 981ffe2832e66..56074f97650f0 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java @@ -10,9 +10,9 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.blobcache.common.ByteRange; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; @@ -329,7 +329,7 @@ private long numberOfEntriesInCache() { private void refreshSystemIndex(boolean failIfNotExist) { try { - final RefreshResponse refreshResponse = systemClient().admin() + final BroadcastResponse refreshResponse = systemClient().admin() .indices() .prepareRefresh(SNAPSHOT_BLOB_CACHE_INDEX) .setIndicesOptions(failIfNotExist ? RefreshRequest.DEFAULT_INDICES_OPTIONS : IndicesOptions.LENIENT_EXPAND_OPEN) diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java index 42ac63579b6c6..b260f6cf2a891 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.searchablesnapshots.cache.shared; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.blobcache.shared.SharedBlobCacheService; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.ShardRouting; @@ -22,7 +23,6 @@ import org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots; import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheAction; import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheRequest; -import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheResponse; import org.elasticsearch.xpack.searchablesnapshots.action.cache.TransportSearchableSnapshotsNodeCachesStatsAction; import org.elasticsearch.xpack.searchablesnapshots.action.cache.TransportSearchableSnapshotsNodeCachesStatsAction.NodeCachesStatsResponse; import org.elasticsearch.xpack.searchablesnapshots.action.cache.TransportSearchableSnapshotsNodeCachesStatsAction.NodesCachesStatsResponse; @@ -117,7 +117,7 @@ public void testNodesCachesStats() throws Exception { assertExecutorIsIdle(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME); - final ClearSearchableSnapshotsCacheResponse clearCacheResponse = client().execute( + final BroadcastResponse clearCacheResponse = client().execute( ClearSearchableSnapshotsCacheAction.INSTANCE, new ClearSearchableSnapshotsCacheRequest(mountedIndex) ).actionGet(); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheAction.java index 9628bc75cd337..f57761b806599 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheAction.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheAction.java @@ -7,13 +7,14 @@ package org.elasticsearch.xpack.searchablesnapshots.action; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; -public class ClearSearchableSnapshotsCacheAction extends ActionType { +public class ClearSearchableSnapshotsCacheAction extends ActionType { public static final ClearSearchableSnapshotsCacheAction INSTANCE = new ClearSearchableSnapshotsCacheAction(); static final String NAME = "cluster:admin/xpack/searchable_snapshots/cache/clear"; private ClearSearchableSnapshotsCacheAction() { - super(NAME, ClearSearchableSnapshotsCacheResponse::new); + super(NAME, BroadcastResponse::new); } } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheResponse.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheResponse.java deleted file mode 100644 index 23a566f23d71b..0000000000000 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheResponse.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.searchablesnapshots.action; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; -import java.util.List; - -public class ClearSearchableSnapshotsCacheResponse extends BroadcastResponse { - - ClearSearchableSnapshotsCacheResponse(StreamInput in) throws IOException { - super(in); - } - - ClearSearchableSnapshotsCacheResponse( - int totalShards, - int successfulShards, - int failedShards, - List shardFailures - ) { - super(totalShards, successfulShards, failedShards, shardFailures); - } -} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportClearSearchableSnapshotsCacheAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportClearSearchableSnapshotsCacheAction.java index 8a4d21b4a98b8..077ee165d58ef 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportClearSearchableSnapshotsCacheAction.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportClearSearchableSnapshotsCacheAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.searchablesnapshots.action; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction.EmptyResult; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -24,7 +25,7 @@ public class TransportClearSearchableSnapshotsCacheAction extends AbstractTransportSearchableSnapshotsAction< ClearSearchableSnapshotsCacheRequest, - ClearSearchableSnapshotsCacheResponse, + BroadcastResponse, EmptyResult> { @Inject @@ -56,11 +57,11 @@ protected EmptyResult readShardResult(StreamInput in) { } @Override - protected ResponseFactory getResponseFactory( + protected ResponseFactory getResponseFactory( ClearSearchableSnapshotsCacheRequest request, ClusterState clusterState ) { - return (totalShards, successfulShards, failedShards, emptyResults, shardFailures) -> new ClearSearchableSnapshotsCacheResponse( + return (totalShards, successfulShards, failedShards, emptyResults, shardFailures) -> new BroadcastResponse( totalShards, successfulShards, failedShards, diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index cab0c2bff28f0..453f489240f77 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -282,6 +282,7 @@ public class Constants { "cluster:admin/xpack/security/user/change_password", "cluster:admin/xpack/security/user/delete", "cluster:admin/xpack/security/user/get", + "cluster:admin/xpack/security/user/query", "cluster:admin/xpack/security/user/has_privileges", "cluster:admin/xpack/security/user/list_privileges", "cluster:admin/xpack/security/user/put", diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryUserIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryUserIT.java new file mode 100644 index 0000000000000..8e6290163efcd --- /dev/null +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryUserIT.java @@ -0,0 +1,490 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.core.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.user.User; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; + +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.hamcrest.core.IsNot.not; + +public class QueryUserIT extends SecurityInBasicRestTestCase { + + private static final String READ_USERS_USER_AUTH_HEADER = "Basic cmVhZF91c2Vyc191c2VyOnJlYWQtdXNlcnMtcGFzc3dvcmQ="; + private static final String TEST_USER_NO_READ_USERS_AUTH_HEADER = "Basic c2VjdXJpdHlfdGVzdF91c2VyOnNlY3VyaXR5LXRlc3QtcGFzc3dvcmQ="; + + private static final Set reservedUsers = Set.of( + "elastic", + "kibana", + "kibana_system", + "logstash_system", + "beats_system", + "apm_system", + "remote_monitoring_user" + ); + + private Request queryUserRequestWithAuth() { + final Request request = new Request(randomFrom("POST", "GET"), "/_security/_query/user"); + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, READ_USERS_USER_AUTH_HEADER)); + return request; + } + + public void testQuery() throws IOException { + // No users to match yet + assertQuery("", users -> assertThat(users, empty())); + + int randomUserCount = createRandomUsers().size(); + + // An empty request body means search for all users (page size = 10) + assertQuery("", users -> assertThat(users.size(), equalTo(Math.min(randomUserCount, 10)))); + + // Match all + assertQuery( + String.format(""" + {"query":{"match_all":{}},"from":0,"size":%s}""", randomUserCount), + users -> assertThat(users.size(), equalTo(randomUserCount)) + ); + + // Exists query + String field = randomFrom("username", "full_name", "roles", "enabled"); + assertQuery( + String.format(""" + {"query":{"exists":{"field":"%s"}},"from":0,"size":%s}""", field, randomUserCount), + users -> assertEquals(users.size(), randomUserCount) + ); + + // Prefix search + User prefixUser1 = createUser( + "mr-prefix1", + new String[] { "master-of-the-universe", "some-other-role" }, + "Prefix1", + "email@something.com", + Map.of(), + true + ); + User prefixUser2 = createUser( + "mr-prefix2", + new String[] { "master-of-the-world", "some-other-role" }, + "Prefix2", + "email@something.com", + Map.of(), + true + ); + assertQuery(""" + {"query":{"bool":{"must":[{"prefix":{"roles":"master-of-the"}}]}}}""", returnedUsers -> { + assertThat(returnedUsers, hasSize(2)); + assertUser(prefixUser1, returnedUsers.get(0)); + assertUser(prefixUser2, returnedUsers.get(1)); + }); + + // Wildcard search + assertQuery(""" + { "query": { "wildcard": {"username": "mr-prefix*"} } }""", users -> { + assertThat(users.size(), equalTo(2)); + assertUser(prefixUser1, users.get(0)); + assertUser(prefixUser2, users.get(1)); + users.forEach(k -> assertThat(k, not(hasKey("_sort")))); + }); + + // Terms query + assertQuery(""" + {"query":{"terms":{"roles":["some-other-role"]}}}""", users -> { + assertThat(users.size(), equalTo(2)); + assertUser(prefixUser1, users.get(0)); + assertUser(prefixUser2, users.get(1)); + }); + + // Test other fields + User otherFieldsTestUser = createUser( + "batman-official-user", + new String[] { "bat-cave-admin" }, + "Batman", + "batman@hotmail.com", + Map.of(), + true + ); + String enabledTerm = "\"enabled\":true"; + String fullNameTerm = "\"full_name\":\"batman\""; + String emailTerm = "\"email\":\"batman@hotmail.com\""; + + final String term = randomFrom(enabledTerm, fullNameTerm, emailTerm); + assertQuery( + Strings.format(""" + {"query":{"term":{%s}},"size":100}""", term), + users -> assertThat( + users.stream().map(u -> u.get(User.Fields.USERNAME.getPreferredName()).toString()).toList(), + hasItem("batman-official-user") + ) + ); + + // Test complex query + assertQuery(""" + { "query": {"bool": {"must": [ + {"wildcard": {"username": "batman-official*"}}, + {"term": {"enabled": true}}],"filter": [{"prefix": {"roles": "bat-cave"}}]}}}""", users -> { + assertThat(users.size(), equalTo(1)); + assertUser(otherFieldsTestUser, users.get(0)); + }); + + // Search for fields outside the allowlist fails + assertQueryError(400, """ + { "query": { "prefix": {"not_allowed": "ABC"} } }"""); + + // Search for fields that are not allowed in Query DSL but used internally by the service itself + final String fieldName = randomFrom("type", "password"); + assertQueryError(400, Strings.format(""" + { "query": { "term": {"%s": "%s"} } }""", fieldName, randomAlphaOfLengthBetween(3, 8))); + + // User without read_security gets 403 trying to search Users + assertQueryError(TEST_USER_NO_READ_USERS_AUTH_HEADER, 403, """ + { "query": { "wildcard": {"name": "*prefix*"} } }"""); + + // Range query not supported + assertQueryError(400, """ + {"query":{"range":{"username":{"lt":"now"}}}}"""); + + // IDs query not supported + assertQueryError(400, """ + { "query": { "ids": { "values": "abc" } } }"""); + + // Make sure we can't query reserved users + String reservedUsername = getReservedUsernameAndAssertExists(); + assertQuery(String.format(""" + {"query":{"term":{"username":"%s"}}}""", reservedUsername), users -> assertTrue(users.isEmpty())); + } + + public void testPagination() throws IOException { + final List users = createRandomUsers(); + + final int from = randomIntBetween(0, 3); + final int size = randomIntBetween(2, 5); + final int remaining = users.size() - from; + + // Using string only sorting to simplify test + final String sortField = "username"; + final List> allUserInfos = new ArrayList<>(remaining); + { + Request request = queryUserRequestWithAuth(); + request.setJsonEntity("{\"from\":" + from + ",\"size\":" + size + ",\"sort\":[\"" + sortField + "\"]}"); + allUserInfos.addAll(collectUsers(request, users.size())); + } + // first batch should be a full page + assertThat(allUserInfos.size(), equalTo(size)); + + while (allUserInfos.size() < remaining) { + final Request request = queryUserRequestWithAuth(); + final List sortValues = extractSortValues(allUserInfos.get(allUserInfos.size() - 1)); + + request.setJsonEntity(Strings.format(""" + {"size":%s,"sort":["%s"],"search_after":["%s"]} + """, size, sortField, sortValues.get(0))); + final List> userInfoPage = collectUsers(request, users.size()); + + if (userInfoPage.isEmpty() && allUserInfos.size() < remaining) { + fail("fail to retrieve all Users, expect [" + remaining + "], got [" + allUserInfos + "]"); + } + allUserInfos.addAll(userInfoPage); + + // Before all users are retrieved, each page should be a full page + if (allUserInfos.size() < remaining) { + assertThat(userInfoPage.size(), equalTo(size)); + } + } + + // Assert sort values match the field of User information + assertThat( + allUserInfos.stream().map(m -> m.get(sortField)).toList(), + equalTo(allUserInfos.stream().map(m -> extractSortValues(m).get(0)).toList()) + ); + + // Assert that all users match the created users and that they're sorted correctly + assertUsers(users, allUserInfos, sortField, from); + + // size can be zero, but total should still reflect the number of users matched + final Request request = queryUserRequestWithAuth(); + request.setJsonEntity("{\"size\":0}"); + final Response response = client().performRequest(request); + assertOK(response); + final Map responseMap = responseAsMap(response); + assertThat(responseMap.get("total"), equalTo(users.size())); + assertThat(responseMap.get("count"), equalTo(0)); + } + + @SuppressWarnings("unchecked") + public void testSort() throws IOException { + final List testUsers = List.of( + createUser("a", new String[] { "4", "5", "6" }), + createUser("b", new String[] { "5", "6" }), + createUser("c", new String[] { "7", "8" }) + ); + assertQuery(""" + {"sort":[{"username":{"order":"desc"}}]}""", users -> { + assertThat(users.size(), equalTo(3)); + for (int i = 2, j = 0; i >= 0; i--, j++) { + assertUser(testUsers.get(j), users.get(i)); + assertThat(users.get(i).get("username"), equalTo(((List) users.get(i).get("_sort")).get(0))); + } + }); + + assertQuery(""" + {"sort":[{"username":{"order":"asc"}}]}""", users -> { + assertThat(users.size(), equalTo(3)); + for (int i = 0; i <= 2; i++) { + assertUser(testUsers.get(i), users.get(i)); + assertThat(users.get(i).get("username"), equalTo(((List) users.get(i).get("_sort")).get(0))); + } + }); + + assertQuery(""" + {"sort":[{"roles":{"order":"asc"}}]}""", users -> { + assertThat(users.size(), equalTo(3)); + for (int i = 0; i <= 2; i++) { + assertUser(testUsers.get(i), users.get(i)); + // Only first element of array is used for sorting + assertThat(((List) users.get(i).get("roles")).get(0), equalTo(((List) users.get(i).get("_sort")).get(0))); + } + }); + + // Make sure sorting on _doc works + assertQuery(""" + {"sort":["_doc"]}""", users -> assertThat(users.size(), equalTo(3))); + + // Make sure multi-field sorting works + assertQuery(""" + {"sort":[{"username":{"order":"asc"}}, {"roles":{"order":"asc"}}]}""", users -> { + assertThat(users.size(), equalTo(3)); + for (int i = 0; i <= 2; i++) { + assertUser(testUsers.get(i), users.get(i)); + assertThat(users.get(i).get("username"), equalTo(((List) users.get(i).get("_sort")).get(0))); + assertThat(((List) users.get(i).get("roles")).get(0), equalTo(((List) users.get(i).get("_sort")).get(1))); + } + }); + + final String invalidFieldName = randomFrom("doc_type", "invalid", "password"); + assertQueryError(400, "{\"sort\":[\"" + invalidFieldName + "\"]}"); + + final String invalidSortName = randomFrom("email", "full_name"); + assertQueryError( + READ_USERS_USER_AUTH_HEADER, + 400, + String.format("{\"sort\":[\"%s\"]}", invalidSortName), + String.format("sorting is not supported for field [%s] in User query", invalidSortName) + ); + } + + private String getReservedUsernameAndAssertExists() throws IOException { + String username = randomFrom(reservedUsers); + final Request request = new Request("GET", "/_security/user"); + + if (randomBoolean()) { + // Update the user to create it in the security index + Request putUserRequest = new Request("PUT", "/_security/user/" + username); + putUserRequest.setJsonEntity("{\"enabled\": true}"); + } + + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, READ_USERS_USER_AUTH_HEADER)); + final Response response = client().performRequest(request); + assertOK(response); + final Map responseMap = responseAsMap(response); + assertNotNull(responseMap.get(username)); + return username; + } + + @SuppressWarnings("unchecked") + private List extractSortValues(Map userInfo) { + return (List) userInfo.get("_sort"); + } + + private List> collectUsers(Request request, int total) throws IOException { + final Response response = client().performRequest(request); + assertOK(response); + final Map responseMap = responseAsMap(response); + @SuppressWarnings("unchecked") + final List> userInfos = (List>) responseMap.get("users"); + assertThat(responseMap.get("total"), equalTo(total)); + assertThat(responseMap.get("count"), equalTo(userInfos.size())); + return userInfos; + } + + private void assertQueryError(int statusCode, String body) { + assertQueryError(READ_USERS_USER_AUTH_HEADER, statusCode, body); + } + + private void assertQueryError(String authHeader, int statusCode, String body) { + assertQueryError(authHeader, statusCode, body, null); + } + + private void assertQueryError(String authHeader, int statusCode, String body, String errorMessage) { + final Request request = new Request(randomFrom("GET", "POST"), "/_security/_query/user"); + request.setJsonEntity(body); + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, authHeader)); + final ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(responseException.getResponse().getStatusLine().getStatusCode(), equalTo(statusCode)); + if (errorMessage != null) { + assertTrue(responseException.getMessage().contains(errorMessage)); + } + } + + private void assertQuery(String body, Consumer>> userVerifier) throws IOException { + final Request request = queryUserRequestWithAuth(); + request.setJsonEntity(body); + final Response response = client().performRequest(request); + assertOK(response); + final Map responseMap = responseAsMap(response); + @SuppressWarnings("unchecked") + final List> users = (List>) responseMap.get("users"); + userVerifier.accept(users); + } + + private void assertUser(User expectedUser, Map actualUser) { + assertUser(userToMap(expectedUser), actualUser); + } + + @SuppressWarnings("unchecked") + private void assertUser(Map expectedUser, Map actualUser) { + assertEquals(expectedUser.get(User.Fields.USERNAME.getPreferredName()), actualUser.get(User.Fields.USERNAME.getPreferredName())); + assertArrayEquals( + ((List) expectedUser.get(User.Fields.ROLES.getPreferredName())).toArray(), + ((List) actualUser.get(User.Fields.ROLES.getPreferredName())).toArray() + ); + assertEquals(expectedUser.get(User.Fields.FULL_NAME.getPreferredName()), actualUser.get(User.Fields.FULL_NAME.getPreferredName())); + assertEquals(expectedUser.get(User.Fields.EMAIL.getPreferredName()), actualUser.get(User.Fields.EMAIL.getPreferredName())); + assertEquals(expectedUser.get(User.Fields.METADATA.getPreferredName()), actualUser.get(User.Fields.METADATA.getPreferredName())); + assertEquals(expectedUser.get(User.Fields.ENABLED.getPreferredName()), actualUser.get(User.Fields.ENABLED.getPreferredName())); + } + + private Map userToMap(User user) { + return Map.of( + User.Fields.USERNAME.getPreferredName(), + user.principal(), + User.Fields.ROLES.getPreferredName(), + Arrays.stream(user.roles()).toList(), + User.Fields.FULL_NAME.getPreferredName(), + user.fullName(), + User.Fields.EMAIL.getPreferredName(), + user.email(), + User.Fields.METADATA.getPreferredName(), + user.metadata(), + User.Fields.ENABLED.getPreferredName(), + user.enabled() + ); + } + + private void assertUsers(List expectedUsers, List> actualUsers, String sortField, int from) { + assertEquals(expectedUsers.size() - from, actualUsers.size()); + + List> sortedExpectedUsers = expectedUsers.stream() + .map(this::userToMap) + .sorted(Comparator.comparing(user -> user.get(sortField).toString())) + .toList(); + + for (int i = from; i < sortedExpectedUsers.size(); i++) { + assertUser(sortedExpectedUsers.get(i), actualUsers.get(i - from)); + } + } + + public static Map randomUserMetadata() { + return ESTestCase.randomFrom( + Map.of( + "employee_id", + ESTestCase.randomAlphaOfLength(5), + "number", + 1, + "numbers", + List.of(1, 3, 5), + "extra", + Map.of("favorite pizza", "margherita", "age", 42) + ), + Map.of(ESTestCase.randomAlphaOfLengthBetween(3, 8), ESTestCase.randomAlphaOfLengthBetween(3, 8)), + Map.of(), + null + ); + } + + private List createRandomUsers() throws IOException { + int randomUserCount = randomIntBetween(8, 15); + final List users = new ArrayList<>(randomUserCount); + + for (int i = 0; i < randomUserCount; i++) { + users.add( + createUser( + randomValueOtherThanMany(reservedUsers::contains, () -> randomAlphaOfLengthBetween(3, 8)) + "-" + i, + randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(3, 8)), + randomAlphaOfLengthBetween(3, 8), + randomAlphaOfLengthBetween(3, 8), + randomUserMetadata(), + randomBoolean() + ) + ); + } + + return users; + } + + private User createUser(String userName, String[] roles) throws IOException { + return createUser( + userName, + roles, + randomAlphaOfLengthBetween(3, 8), + randomAlphaOfLengthBetween(3, 8), + randomUserMetadata(), + randomBoolean() + ); + } + + private User createUser(String userName, String[] roles, String fullName, String email, Map metadata, boolean enabled) + throws IOException { + + final Request request = new Request("POST", "/_security/user/" + userName); + BytesReference source = BytesReference.bytes( + jsonBuilder().map( + Map.of( + User.Fields.USERNAME.getPreferredName(), + userName, + User.Fields.ROLES.getPreferredName(), + roles, + User.Fields.FULL_NAME.getPreferredName(), + fullName, + User.Fields.EMAIL.getPreferredName(), + email, + User.Fields.METADATA.getPreferredName(), + metadata == null ? Map.of() : metadata, + User.Fields.PASSWORD.getPreferredName(), + "100%-security-guaranteed", + User.Fields.ENABLED.getPreferredName(), + enabled + ) + ) + ); + request.setJsonEntity(source.utf8ToString()); + Response response = adminClient().performRequest(request); + assertOK(response); + assertTrue((boolean) responseAsMap(response).get("created")); + return new User(userName, roles, fullName, email, metadata, enabled); + } +} diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java index 5843350e36457..587cc4643514c 100644 --- a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java @@ -22,6 +22,9 @@ public abstract class SecurityInBasicRestTestCase extends ESRestTestCase { protected static final String REST_USER = "security_test_user"; private static final SecureString REST_PASSWORD = new SecureString("security-test-password".toCharArray()); + protected static final String READ_USERS_USER = "read_users_user"; + private static final SecureString READ_USERS_PASSWORD = new SecureString("read-users-password".toCharArray()); + private static final String ADMIN_USER = "admin_user"; private static final SecureString ADMIN_PASSWORD = new SecureString("admin-password".toCharArray()); @@ -47,6 +50,7 @@ public abstract class SecurityInBasicRestTestCase extends ESRestTestCase { .user(REST_USER, REST_PASSWORD.toString(), "security_test_role", false) .user(API_KEY_USER, API_KEY_USER_PASSWORD.toString(), "api_key_user_role", false) .user(API_KEY_ADMIN_USER, API_KEY_ADMIN_USER_PASSWORD.toString(), "api_key_admin_role", false) + .user(READ_USERS_USER, READ_USERS_PASSWORD.toString(), "read_users_user_role", false) .build(); @Override diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/resources/roles.yml b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/resources/roles.yml index 47f1c05ffaaf8..15c291274bcdb 100644 --- a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/resources/roles.yml +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/resources/roles.yml @@ -18,6 +18,11 @@ api_key_user_role: cluster: - manage_own_api_key +# Used to perform query user operations +read_users_user_role: + cluster: + - read_security + # Role with remote indices privileges role_remote_indices: remote_indices: diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java index 1e1d8a7f0654c..3fbcd00690e82 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java @@ -9,9 +9,8 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.SecureString; @@ -377,14 +376,14 @@ private void prepareIndices() { assertCacheState(DLS_TEMPLATE_ROLE_QUERY_INDEX, 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - final ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge( + final BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge( DLS_INDEX, FLS_INDEX, INDEX, DLS_TEMPLATE_ROLE_QUERY_INDEX ).setFlush(true).get(); ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); - final RefreshResponse refreshResponse = indicesAdmin().prepareRefresh(DLS_INDEX, FLS_INDEX, INDEX, DLS_TEMPLATE_ROLE_QUERY_INDEX) + final BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh(DLS_INDEX, FLS_INDEX, INDEX, DLS_TEMPLATE_ROLE_QUERY_INDEX) .get(); assertThat(refreshResponse.getFailedShards(), equalTo(0)); ensureGreen(DLS_INDEX, FLS_INDEX, INDEX, DLS_TEMPLATE_ROLE_QUERY_INDEX); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java index e481cf70b9afe..79cf0cb9f7987 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java @@ -81,7 +81,6 @@ public void testFetchAllByEntityWithBrokenScroll() { request.scroll(TimeValue.timeValueHours(10L)); String scrollId = randomAlphaOfLength(5); - SearchHit[] hits = new SearchHit[] { new SearchHit(1), new SearchHit(2) }; Answer returnResponse = invocation -> { @SuppressWarnings("unchecked") @@ -89,7 +88,11 @@ public void testFetchAllByEntityWithBrokenScroll() { ActionListener.respondAndRelease( listener, new SearchResponse( - new SearchHits(hits, new TotalHits(3, TotalHits.Relation.EQUAL_TO), 1), + SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(1), SearchHit.unpooled(2) }, + new TotalHits(3, TotalHits.Relation.EQUAL_TO), + 1 + ), null, null, false, diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index 1329158f57d4d..a693c192f5fd2 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -15,10 +15,10 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.TransportGetAction; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -810,7 +810,7 @@ private void doTestDeletionBehaviorWhenKeysBecomeInvalidBeforeAndAfterRetentionP private void refreshSecurityIndex() throws Exception { assertBusy(() -> { - final RefreshResponse refreshResponse = indicesAdmin().prepareRefresh(SECURITY_MAIN_ALIAS).get(); + final BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh(SECURITY_MAIN_ALIAS).get(); assertThat(refreshResponse.getFailedShards(), is(0)); }); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index c6b441d9cc04f..b6c6ea60d869f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -260,6 +260,7 @@ import org.elasticsearch.xpack.security.action.user.TransportGetUsersAction; import org.elasticsearch.xpack.security.action.user.TransportHasPrivilegesAction; import org.elasticsearch.xpack.security.action.user.TransportPutUserAction; +import org.elasticsearch.xpack.security.action.user.TransportQueryUserAction; import org.elasticsearch.xpack.security.action.user.TransportSetEnabledAction; import org.elasticsearch.xpack.security.audit.AuditTrail; import org.elasticsearch.xpack.security.audit.AuditTrailService; @@ -365,6 +366,7 @@ import org.elasticsearch.xpack.security.rest.action.user.RestHasPrivilegesAction; import org.elasticsearch.xpack.security.rest.action.user.RestProfileHasPrivilegesAction; import org.elasticsearch.xpack.security.rest.action.user.RestPutUserAction; +import org.elasticsearch.xpack.security.rest.action.user.RestQueryUserAction; import org.elasticsearch.xpack.security.rest.action.user.RestSetEnabledAction; import org.elasticsearch.xpack.security.support.CacheInvalidatorRegistry; import org.elasticsearch.xpack.security.support.ExtensionComponents; @@ -1315,6 +1317,7 @@ public void onIndexModule(IndexModule module) { new ActionHandler<>(ClearPrivilegesCacheAction.INSTANCE, TransportClearPrivilegesCacheAction.class), new ActionHandler<>(ClearSecurityCacheAction.INSTANCE, TransportClearSecurityCacheAction.class), new ActionHandler<>(GetUsersAction.INSTANCE, TransportGetUsersAction.class), + new ActionHandler<>(ActionTypes.QUERY_USER_ACTION, TransportQueryUserAction.class), new ActionHandler<>(PutUserAction.INSTANCE, TransportPutUserAction.class), new ActionHandler<>(DeleteUserAction.INSTANCE, TransportDeleteUserAction.class), new ActionHandler<>(GetRolesAction.INSTANCE, TransportGetRolesAction.class), @@ -1406,6 +1409,7 @@ public List getRestHandlers( new RestClearApiKeyCacheAction(settings, getLicenseState()), new RestClearServiceAccountTokenStoreCacheAction(settings, getLicenseState()), new RestGetUsersAction(settings, getLicenseState()), + new RestQueryUserAction(settings, getLicenseState()), new RestPutUserAction(settings, getLicenseState()), new RestDeleteUserAction(settings, getLicenseState()), new RestGetRolesAction(settings, getLicenseState()), diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportQueryUserAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportQueryUserAction.java new file mode 100644 index 0000000000000..2a9aef73ff62a --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportQueryUserAction.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.ActionTypes; +import org.elasticsearch.xpack.core.security.action.user.QueryUserRequest; +import org.elasticsearch.xpack.core.security.action.user.QueryUserResponse; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; +import org.elasticsearch.xpack.security.support.UserBoolQueryBuilder; + +import java.util.List; +import java.util.Locale; +import java.util.Set; + +import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; +import static org.elasticsearch.xpack.security.support.UserBoolQueryBuilder.USER_FIELD_NAME_TRANSLATOR; + +public final class TransportQueryUserAction extends TransportAction { + private final NativeUsersStore usersStore; + private static final Set FIELD_NAMES_WITH_SORT_SUPPORT = Set.of("username", "roles", "enabled"); + + @Inject + public TransportQueryUserAction(TransportService transportService, ActionFilters actionFilters, NativeUsersStore usersStore) { + super(ActionTypes.QUERY_USER_ACTION.name(), actionFilters, transportService.getTaskManager()); + this.usersStore = usersStore; + } + + @Override + protected void doExecute(Task task, QueryUserRequest request, ActionListener listener) { + final SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.searchSource() + .version(false) + .fetchSource(true) + .trackTotalHits(true); + + if (request.getFrom() != null) { + searchSourceBuilder.from(request.getFrom()); + } + if (request.getSize() != null) { + searchSourceBuilder.size(request.getSize()); + } + + searchSourceBuilder.query(UserBoolQueryBuilder.build(request.getQueryBuilder())); + + if (request.getFieldSortBuilders() != null) { + translateFieldSortBuilders(request.getFieldSortBuilders(), searchSourceBuilder); + } + + if (request.getSearchAfterBuilder() != null) { + searchSourceBuilder.searchAfter(request.getSearchAfterBuilder().getSortValues()); + } + + final SearchRequest searchRequest = new SearchRequest(new String[] { SECURITY_MAIN_ALIAS }, searchSourceBuilder); + usersStore.queryUsers(searchRequest, listener); + } + + // package private for testing + static void translateFieldSortBuilders(List fieldSortBuilders, SearchSourceBuilder searchSourceBuilder) { + fieldSortBuilders.forEach(fieldSortBuilder -> { + if (fieldSortBuilder.getNestedSort() != null) { + throw new IllegalArgumentException("nested sorting is not supported for User query"); + } + if (FieldSortBuilder.DOC_FIELD_NAME.equals(fieldSortBuilder.getFieldName())) { + searchSourceBuilder.sort(fieldSortBuilder); + } else { + final String translatedFieldName = USER_FIELD_NAME_TRANSLATOR.translate(fieldSortBuilder.getFieldName()); + if (FIELD_NAMES_WITH_SORT_SUPPORT.contains(translatedFieldName) == false) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "sorting is not supported for field [%s] in User query", fieldSortBuilder.getFieldName()) + ); + } + + if (translatedFieldName.equals(fieldSortBuilder.getFieldName())) { + searchSourceBuilder.sort(fieldSortBuilder); + } else { + final FieldSortBuilder translatedFieldSortBuilder = new FieldSortBuilder(translatedFieldName).order( + fieldSortBuilder.order() + ) + .missing(fieldSortBuilder.missing()) + .unmappedType(fieldSortBuilder.unmappedType()) + .setFormat(fieldSortBuilder.getFormat()); + + if (fieldSortBuilder.sortMode() != null) { + translatedFieldSortBuilder.sortMode(fieldSortBuilder.sortMode()); + } + if (fieldSortBuilder.getNumericType() != null) { + translatedFieldSortBuilder.setNumericType(fieldSortBuilder.getNumericType()); + } + searchSourceBuilder.sort(translatedFieldSortBuilder); + } + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index 5a8b228a1145c..26f6268aaa5dc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -210,7 +210,7 @@ public class TokenService { static final TransportVersion VERSION_ACCESS_TOKENS_AS_UUIDS = TransportVersions.V_7_2_0; static final TransportVersion VERSION_MULTIPLE_CONCURRENT_REFRESHES = TransportVersions.V_7_2_0; static final TransportVersion VERSION_CLIENT_AUTH_FOR_REFRESH = TransportVersions.V_8_2_0; - static final TransportVersion VERSION_GET_TOKEN_DOC_FOR_REFRESH = TransportVersions.V_8_500_061; + static final TransportVersion VERSION_GET_TOKEN_DOC_FOR_REFRESH = TransportVersions.V_8_10_X; private static final Logger logger = LogManager.getLogger(TokenService.class); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java index 36f78682b6bd1..81aa487f73e2c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; @@ -43,6 +44,7 @@ import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequest; import org.elasticsearch.xpack.core.security.action.user.DeleteUserRequest; import org.elasticsearch.xpack.core.security.action.user.PutUserRequest; +import org.elasticsearch.xpack.core.security.action.user.QueryUserResponse; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; import org.elasticsearch.xpack.core.security.authc.support.Hasher; @@ -57,6 +59,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.function.Consumer; import java.util.function.Supplier; @@ -161,6 +164,40 @@ public void getUsers(String[] userNames, final ActionListener> } } + public void queryUsers(SearchRequest searchRequest, ActionListener listener) { + final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); + if (frozenSecurityIndex.indexExists() == false) { + logger.debug("security index does not exist"); + listener.onResponse(QueryUserResponse.emptyResponse()); + } else if (frozenSecurityIndex.isAvailable(SEARCH_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(SEARCH_SHARDS)); + } else { + securityIndex.checkIndexVersionThenExecute( + listener::onFailure, + () -> executeAsyncWithOrigin( + client, + SECURITY_ORIGIN, + TransportSearchAction.TYPE, + searchRequest, + ActionListener.wrap(searchResponse -> { + final long total = searchResponse.getHits().getTotalHits().value; + if (total == 0) { + logger.debug("No users found for query [{}]", searchRequest.source().query()); + listener.onResponse(QueryUserResponse.emptyResponse()); + return; + } + + final List userItem = Arrays.stream(searchResponse.getHits().getHits()).map(hit -> { + UserAndPassword userAndPassword = transformUser(hit.getId(), hit.getSourceAsMap()); + return userAndPassword != null ? new QueryUserResponse.Item(userAndPassword.user(), hit.getSortValues()) : null; + }).filter(Objects::nonNull).toList(); + listener.onResponse(new QueryUserResponse(total, userItem)); + }, listener::onFailure) + ) + ); + } + } + void getUserCount(final ActionListener listener) { final SecurityIndexManager frozenSecurityIndex = this.securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserAction.java new file mode 100644 index 0000000000000..407fe36fa82d3 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserAction.java @@ -0,0 +1,115 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.rest.action.user; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.search.searchafter.SearchAfterBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.action.ActionTypes; +import org.elasticsearch.xpack.core.security.action.user.QueryUserRequest; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.index.query.AbstractQueryBuilder.parseTopLevelQuery; +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Rest action to search for Users + */ +public final class RestQueryUserAction extends SecurityBaseRestHandler { + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "query_user_request_payload", + a -> new Payload((QueryBuilder) a[0], (Integer) a[1], (Integer) a[2], (List) a[3], (SearchAfterBuilder) a[4]) + ); + + static { + PARSER.declareObject(optionalConstructorArg(), (p, c) -> parseTopLevelQuery(p), new ParseField("query")); + PARSER.declareInt(optionalConstructorArg(), new ParseField("from")); + PARSER.declareInt(optionalConstructorArg(), new ParseField("size")); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return new FieldSortBuilder(p.text()); + } else if (p.currentToken() == XContentParser.Token.START_OBJECT) { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, p.nextToken(), p); + final FieldSortBuilder fieldSortBuilder = FieldSortBuilder.fromXContent(p, p.currentName()); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, p.nextToken(), p); + return fieldSortBuilder; + } else { + throw new IllegalArgumentException("mal-formatted sort object"); + } + }, new ParseField("sort")); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> SearchAfterBuilder.fromXContent(p), + new ParseField("search_after"), + ObjectParser.ValueType.VALUE_ARRAY + ); + } + + /** + * @param settings the node's settings + * @param licenseState the license state that will be used to determine if + * security is licensed + */ + public RestQueryUserAction(Settings settings, XPackLicenseState licenseState) { + super(settings, licenseState); + } + + @Override + public List routes() { + return List.of(new Route(GET, "/_security/_query/user"), new Route(POST, "/_security/_query/user")); + } + + @Override + public String getName() { + return "xpack_security_query_user"; + } + + @Override + protected RestChannelConsumer innerPrepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final QueryUserRequest queryUserRequest; + if (request.hasContentOrSourceParam()) { + final Payload payload = PARSER.parse(request.contentOrSourceParamParser(), null); + queryUserRequest = new QueryUserRequest( + payload.queryBuilder, + payload.from, + payload.size, + payload.fieldSortBuilders, + payload.searchAfterBuilder + ); + } else { + queryUserRequest = new QueryUserRequest(null, null, null, null, null); + } + return channel -> client.execute(ActionTypes.QUERY_USER_ACTION, queryUserRequest, new RestToXContentListener<>(channel)); + } + + private record Payload( + @Nullable QueryBuilder queryBuilder, + @Nullable Integer from, + @Nullable Integer size, + @Nullable List fieldSortBuilders, + @Nullable SearchAfterBuilder searchAfterBuilder + ) {} +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexFieldNameTranslator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexFieldNameTranslator.java new file mode 100644 index 0000000000000..291d55b7b0837 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexFieldNameTranslator.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.support; + +import java.util.List; +import java.util.function.Function; +import java.util.function.Predicate; + +public class SecurityIndexFieldNameTranslator { + + private final List fieldNameTranslators; + + public SecurityIndexFieldNameTranslator(List fieldNameTranslators) { + this.fieldNameTranslators = fieldNameTranslators; + } + + public String translate(String queryFieldName) { + for (FieldName fieldName : this.fieldNameTranslators) { + if (fieldName.supportsQueryName(queryFieldName)) { + return fieldName.indexFieldName(queryFieldName); + } + } + throw new IllegalArgumentException("Field [" + queryFieldName + "] is not allowed"); + } + + public boolean supportedIndexFieldName(String indexFieldName) { + for (FieldName fieldName : this.fieldNameTranslators) { + if (fieldName.supportsIndexName(indexFieldName)) { + return true; + } + } + return false; + } + + public static FieldName exact(String name) { + return exact(name, Function.identity()); + } + + public static FieldName exact(String name, Function translation) { + return new SecurityIndexFieldNameTranslator.ExactFieldName(name, translation); + } + + public abstract static class FieldName { + private final Function toIndexFieldName; + protected final Predicate validIndexNamePredicate; + + FieldName(Function toIndexFieldName, Predicate validIndexNamePredicate) { + this.toIndexFieldName = toIndexFieldName; + this.validIndexNamePredicate = validIndexNamePredicate; + } + + public abstract boolean supportsQueryName(String queryFieldName); + + public abstract boolean supportsIndexName(String indexFieldName); + + public String indexFieldName(String queryFieldName) { + return toIndexFieldName.apply(queryFieldName); + } + } + + private static class ExactFieldName extends FieldName { + private final String name; + + private ExactFieldName(String name, Function toIndexFieldName) { + super(toIndexFieldName, fieldName -> toIndexFieldName.apply(name).equals(fieldName)); + this.name = name; + } + + @Override + public boolean supportsQueryName(String queryFieldName) { + return queryFieldName.equals(name); + } + + @Override + public boolean supportsIndexName(String indexFieldName) { + return validIndexNamePredicate.test(indexFieldName); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/UserBoolQueryBuilder.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/UserBoolQueryBuilder.java new file mode 100644 index 0000000000000..5d3824ab1f8ce --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/UserBoolQueryBuilder.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.support; + +import org.apache.lucene.search.Query; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.ExistsQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.PrefixQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.index.query.WildcardQueryBuilder; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.security.support.SecurityIndexFieldNameTranslator.exact; + +public class UserBoolQueryBuilder extends BoolQueryBuilder { + public static final SecurityIndexFieldNameTranslator USER_FIELD_NAME_TRANSLATOR = new SecurityIndexFieldNameTranslator( + List.of(exact("username"), exact("roles"), exact("full_name"), exact("email"), exact("enabled")) + ); + + private UserBoolQueryBuilder() {} + + public static UserBoolQueryBuilder build(QueryBuilder queryBuilder) { + UserBoolQueryBuilder userQueryBuilder = new UserBoolQueryBuilder(); + if (queryBuilder != null) { + QueryBuilder translaterdQueryBuilder = translateToUserQueryBuilder(queryBuilder); + userQueryBuilder.must(translaterdQueryBuilder); + } + userQueryBuilder.filter(QueryBuilders.termQuery("type", "user")); + + return userQueryBuilder; + } + + private static QueryBuilder translateToUserQueryBuilder(QueryBuilder qb) { + if (qb instanceof final BoolQueryBuilder query) { + final BoolQueryBuilder newQuery = QueryBuilders.boolQuery() + .minimumShouldMatch(query.minimumShouldMatch()) + .adjustPureNegative(query.adjustPureNegative()); + query.must().stream().map(UserBoolQueryBuilder::translateToUserQueryBuilder).forEach(newQuery::must); + query.should().stream().map(UserBoolQueryBuilder::translateToUserQueryBuilder).forEach(newQuery::should); + query.mustNot().stream().map(UserBoolQueryBuilder::translateToUserQueryBuilder).forEach(newQuery::mustNot); + query.filter().stream().map(UserBoolQueryBuilder::translateToUserQueryBuilder).forEach(newQuery::filter); + return newQuery; + } else if (qb instanceof MatchAllQueryBuilder) { + return qb; + } else if (qb instanceof final TermQueryBuilder query) { + final String translatedFieldName = USER_FIELD_NAME_TRANSLATOR.translate(query.fieldName()); + return QueryBuilders.termQuery(translatedFieldName, query.value()).caseInsensitive(query.caseInsensitive()); + } else if (qb instanceof final ExistsQueryBuilder query) { + final String translatedFieldName = USER_FIELD_NAME_TRANSLATOR.translate(query.fieldName()); + return QueryBuilders.existsQuery(translatedFieldName); + } else if (qb instanceof final TermsQueryBuilder query) { + if (query.termsLookup() != null) { + throw new IllegalArgumentException("Terms query with terms lookup is not supported for User query"); + } + final String translatedFieldName = USER_FIELD_NAME_TRANSLATOR.translate(query.fieldName()); + return QueryBuilders.termsQuery(translatedFieldName, query.getValues()); + } else if (qb instanceof final PrefixQueryBuilder query) { + final String translatedFieldName = USER_FIELD_NAME_TRANSLATOR.translate(query.fieldName()); + return QueryBuilders.prefixQuery(translatedFieldName, query.value()).caseInsensitive(query.caseInsensitive()); + } else if (qb instanceof final WildcardQueryBuilder query) { + final String translatedFieldName = USER_FIELD_NAME_TRANSLATOR.translate(query.fieldName()); + return QueryBuilders.wildcardQuery(translatedFieldName, query.value()) + .caseInsensitive(query.caseInsensitive()) + .rewrite(query.rewrite()); + } else { + throw new IllegalArgumentException("Query type [" + qb.getName() + "] is not supported for User query"); + } + } + + @Override + protected Query doToQuery(SearchExecutionContext context) throws IOException { + context.setAllowedFields(this::isIndexFieldNameAllowed); + return super.doToQuery(context); + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { + if (queryRewriteContext instanceof SearchExecutionContext) { + ((SearchExecutionContext) queryRewriteContext).setAllowedFields(this::isIndexFieldNameAllowed); + } + return super.doRewrite(queryRewriteContext); + } + + boolean isIndexFieldNameAllowed(String queryFieldName) { + // Type is needed to filter on user doc type + return queryFieldName.equals("type") || USER_FIELD_NAME_TRANSLATOR.supportedIndexFieldName(queryFieldName); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index a088e6c61822a..4127b8cdad32b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; @@ -34,6 +33,7 @@ import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.internal.Client; @@ -197,25 +197,30 @@ protected void SearchRequest searchRequest = (SearchRequest) request; searchRequests.add(searchRequest); final SearchHit[] hits = searchFunction.apply(searchRequest); - ActionListener.respondAndRelease( - listener, - (Response) new SearchResponse( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1, - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + final var searchHits = new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f); + try { + ActionListener.respondAndRelease( + listener, + (Response) new SearchResponse( + searchHits, + null, + null, + false, + false, + null, + 1, + "_scrollId1", + 1, + 1, + 0, + 1, + null, + null + ) + ); + } finally { + searchHits.decRef(); + } } else if (TransportSearchScrollAction.TYPE.name().equals(action.name())) { assertThat(request, instanceOf(SearchScrollRequest.class)); ActionListener.respondAndRelease( @@ -245,7 +250,7 @@ protected void listener.onResponse((Response) response); } else if (RefreshAction.NAME.equals(action.name())) { assertThat(request, instanceOf(RefreshRequest.class)); - listener.onResponse((Response) mock(RefreshResponse.class)); + listener.onResponse((Response) mock(BroadcastResponse.class)); } else { super.doExecute(action, request, listener); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportQueryUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportQueryUserActionTests.java new file mode 100644 index 0000000000000..aa5f935998757 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportQueryUserActionTests.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.search.sort.NestedSortBuilder; +import org.elasticsearch.search.sort.SortMode; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESTestCase; + +import java.util.List; +import java.util.Locale; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class TransportQueryUserActionTests extends ESTestCase { + private static final String[] allowedIndexFieldNames = new String[] { "username", "roles", "enabled" }; + + public void testTranslateFieldSortBuilders() { + final List fieldNames = List.of(allowedIndexFieldNames); + + final List originals = fieldNames.stream().map(this::randomFieldSortBuilderWithName).toList(); + + final SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.searchSource(); + TransportQueryUserAction.translateFieldSortBuilders(originals, searchSourceBuilder); + + IntStream.range(0, originals.size()).forEach(i -> { + final FieldSortBuilder original = originals.get(i); + final FieldSortBuilder translated = (FieldSortBuilder) searchSourceBuilder.sorts().get(i); + assertThat(original.getFieldName(), equalTo(translated.getFieldName())); + + assertThat(translated.order(), equalTo(original.order())); + assertThat(translated.missing(), equalTo(original.missing())); + assertThat(translated.unmappedType(), equalTo(original.unmappedType())); + assertThat(translated.getNumericType(), equalTo(original.getNumericType())); + assertThat(translated.getFormat(), equalTo(original.getFormat())); + assertThat(translated.sortMode(), equalTo(original.sortMode())); + }); + } + + public void testNestedSortingIsNotAllowed() { + final FieldSortBuilder fieldSortBuilder = new FieldSortBuilder("roles"); + fieldSortBuilder.setNestedSort(new NestedSortBuilder("something")); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> TransportQueryUserAction.translateFieldSortBuilders(List.of(fieldSortBuilder), SearchSourceBuilder.searchSource()) + ); + assertThat(e.getMessage(), equalTo("nested sorting is not supported for User query")); + } + + public void testNestedSortingOnTextFieldsNotAllowed() { + String fieldName = randomFrom("full_name", "email"); + final List fieldNames = List.of(fieldName); + final List originals = fieldNames.stream().map(this::randomFieldSortBuilderWithName).toList(); + final SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.searchSource(); + + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> TransportQueryUserAction.translateFieldSortBuilders(originals, searchSourceBuilder) + ); + assertThat(e.getMessage(), equalTo(String.format(Locale.ROOT, "sorting is not supported for field [%s] in User query", fieldName))); + } + + private FieldSortBuilder randomFieldSortBuilderWithName(String name) { + final FieldSortBuilder fieldSortBuilder = new FieldSortBuilder(name); + fieldSortBuilder.order(randomBoolean() ? SortOrder.ASC : SortOrder.DESC); + fieldSortBuilder.setFormat(randomBoolean() ? randomAlphaOfLengthBetween(3, 16) : null); + if (randomBoolean()) { + fieldSortBuilder.setNumericType(randomFrom("long", "double", "date", "date_nanos")); + } + if (randomBoolean()) { + fieldSortBuilder.missing(randomAlphaOfLengthBetween(3, 8)); + } + if (randomBoolean()) { + fieldSortBuilder.sortMode(randomFrom(SortMode.values())); + } + return fieldSortBuilder; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index b921fef9fd917..ac11dee8d4a48 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -405,7 +405,7 @@ public void testInvalidateApiKeysWillSetInvalidatedFlagAndRecordTimestamp() { when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn(new SearchRequestBuilder(client)); doAnswer(invocation -> { final var listener = (ActionListener) invocation.getArguments()[1]; - final var searchHit = new SearchHit(docId, apiKeyId); + final var searchHit = SearchHit.unpooled(docId, apiKeyId); try (XContentBuilder builder = JsonXContent.contentBuilder()) { builder.map(buildApiKeySourceDoc("some_hash".toCharArray())); searchHit.sourceRef(BytesReference.bytes(builder)); @@ -413,7 +413,7 @@ public void testInvalidateApiKeysWillSetInvalidatedFlagAndRecordTimestamp() { ActionListener.respondAndRelease( listener, new SearchResponse( - new SearchHits( + SearchHits.unpooled( new SearchHit[] { searchHit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), randomFloat(), @@ -758,7 +758,7 @@ public void testCrossClusterApiKeyUsageStats() { ActionListener.respondAndRelease( listener, new SearchResponse( - new SearchHits( + SearchHits.unpooled( searchHits.toArray(SearchHit[]::new), new TotalHits(searchHits.size(), TotalHits.Relation.EQUAL_TO), randomFloat(), @@ -825,7 +825,7 @@ private SearchHit searchHitForCrossClusterApiKey(int crossClusterAccessLevel) { }; final int docId = randomIntBetween(0, Integer.MAX_VALUE); final String apiKeyId = randomAlphaOfLength(20); - final var searchHit = new SearchHit(docId, apiKeyId); + final var searchHit = SearchHit.unpooled(docId, apiKeyId); try (XContentBuilder builder = JsonXContent.contentBuilder()) { builder.map(XContentHelper.convertToMap(JsonXContent.jsonXContent, Strings.format(""" { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index e9a252553fe8d..3c6f7462c0bb4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -265,6 +265,7 @@ public void init() throws Exception { client = mock(Client.class); threadPool = new ThreadPool( settings, + MeterRegistry.NOOP, new FixedExecutorBuilder( settings, THREAD_POOL_NAME, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 2f646631d14cd..adf0b44266260 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -62,6 +62,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; @@ -254,7 +255,7 @@ private static DiscoveryNode addAnotherPre8500DataNode(ClusterService clusterSer transportVersion = TransportVersions.V_8_8_1; } else { version = Version.V_8_9_0; - transportVersion = TransportVersions.V_8_500_020; + transportVersion = TransportVersions.V_8_9_X; } return addAnotherDataNodeWithVersion(clusterService, version, transportVersion); } @@ -269,6 +270,7 @@ public void tearDown() throws Exception { public static void startThreadPool() throws IOException { threadPool = new ThreadPool( settings, + MeterRegistry.NOOP, new FixedExecutorBuilder( settings, TokenService.THREAD_POOL_NAME, @@ -1235,9 +1237,9 @@ private void mockTokenForRefreshToken( assertThat(refreshFilter.fieldName(), is("refresh_token.token")); final SearchHits hits; if (storedRefreshToken.equals(refreshFilter.value())) { - SearchHit hit = new SearchHit(randomInt(), "token_" + userToken.getId()); + SearchHit hit = SearchHit.unpooled(randomInt(), "token_" + userToken.getId()); hit.sourceRef(docSource); - hits = new SearchHits(new SearchHit[] { hit }, null, 1); + hits = SearchHits.unpooled(new SearchHit[] { hit }, null, 1); } else { hits = SearchHits.EMPTY_WITH_TOTAL_HITS; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java index 3a9fee4288bf2..33d3e6783b9e6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java @@ -263,7 +263,7 @@ public void testFindTokensFor() { if (r instanceof SearchRequest) { final SearchHit[] hits = IntStream.range(0, nhits) .mapToObj( - i -> new SearchHit( + i -> SearchHit.unpooled( randomIntBetween(0, Integer.MAX_VALUE), SERVICE_ACCOUNT_TOKEN_DOC_TYPE + "-" + accountId.asPrincipal() + "/" + tokenNames[i] ) @@ -272,7 +272,7 @@ public void testFindTokensFor() { ActionListener.respondAndRelease( l, new SearchResponse( - new SearchHits(hits, new TotalHits(nhits, TotalHits.Relation.EQUAL_TO), randomFloat(), null, null, null), + SearchHits.unpooled(hits, new TotalHits(nhits, TotalHits.Relation.EQUAL_TO), randomFloat(), null, null, null), null, null, false, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 169275ccc3ee3..a0008ba632151 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -346,7 +346,7 @@ private void doAnswerWithSearchResult(Client client, ExpressionRoleMapping mappi doAnswer(invocation -> { @SuppressWarnings("unchecked") final var listener = (ActionListener) invocation.getArguments()[1]; - final var searchHit = new SearchHit( + final var searchHit = SearchHit.unpooled( randomIntBetween(0, Integer.MAX_VALUE), NativeRoleMappingStore.getIdForName(mapping.getName()) ); @@ -357,14 +357,7 @@ private void doAnswerWithSearchResult(Client client, ExpressionRoleMapping mappi ActionListener.respondAndRelease( listener, new SearchResponse( - new SearchHits( - new SearchHit[] { searchHit }, - new TotalHits(1, TotalHits.Relation.EQUAL_TO), - randomFloat(), - null, - null, - null - ), + SearchHits.unpooled(new SearchHit[] { searchHit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), randomFloat()), null, null, false, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index 0c2f9cefbcffb..ed1b5e6c7668b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -818,22 +818,12 @@ private SearchHit[] buildHits(List sourcePrivile } private static SearchResponse buildSearchResponse(SearchHit[] hits) { - return new SearchResponse( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1, - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ); + var searchHits = new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f); + try { + return new SearchResponse(searchHits.asUnpooled(), null, null, false, false, null, 1, "_scrollId1", 1, 1, 0, 1, null, null); + } finally { + searchHits.decRef(); + } } private void handleBulkRequest(int expectedCount, Predicate> isCreated) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java index 2abbb6a610170..3a4e5a404eace 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.http.HttpInfo; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.node.Node; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -84,6 +85,7 @@ public static void startThreadPool() throws IOException { final Settings settings = Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "InternalEnrollmentTokenGeneratorTests").build(); threadPool = new ThreadPool( settings, + MeterRegistry.NOOP, new FixedExecutorBuilder( settings, TokenService.THREAD_POOL_NAME, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java index 791aba46c92ea..0ab9533e62d4c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -54,7 +55,7 @@ public void setUp() throws Exception { .put("node.name", "test-" + getTestName()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - threadPool = new ThreadPool(settings); + threadPool = new ThreadPool(settings, MeterRegistry.NOOP); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java index a1f696cc5dddd..2ee42b360f02a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -60,7 +61,7 @@ public void setUp() throws Exception { .put("node.name", "test-" + getTestName()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - threadPool = new ThreadPool(settings); + threadPool = new ThreadPool(settings, MeterRegistry.NOOP); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java index 3c0e24da32763..8bbd051c2fc32 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -53,7 +54,7 @@ public void setUp() throws Exception { .put("node.name", "test-" + getTestName()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - threadPool = new ThreadPool(settings); + threadPool = new ThreadPool(settings, MeterRegistry.NOOP); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java index 67d2ab006eb22..4f14d8414ebca 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.searchafter.SearchAfterBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -58,7 +59,7 @@ public void setUp() throws Exception { .put("node.name", "test-" + getTestName()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - threadPool = new ThreadPool(settings); + threadPool = new ThreadPool(settings, MeterRegistry.NOOP); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java new file mode 100644 index 0000000000000..4a593eeb24ac6 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.rest.action.user; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.PrefixQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.AbstractRestChannel; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.searchafter.SearchAfterBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.action.user.QueryUserRequest; +import org.elasticsearch.xpack.core.security.action.user.QueryUserResponse; + +import java.util.List; + +import static org.hamcrest.collection.IsCollectionWithSize.hasSize; +import static org.hamcrest.core.Is.is; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.mockito.Mockito.mock; + +public class RestQueryUserActionTests extends ESTestCase { + + private final XPackLicenseState mockLicenseState = mock(XPackLicenseState.class); + + @Override + protected NamedXContentRegistry xContentRegistry() { + final SearchModule searchModule = new SearchModule(Settings.EMPTY, List.of()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + + public void testQueryParsing() throws Exception { + final String query1 = """ + { + "query": { + "bool": { + "must": [ + { + "terms": { + "username": [ "bart", "homer" ] + } + } + ], + "should": [ { "prefix": { "username": "ba" } } ] + } + } + }"""; + final FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray(query1), + XContentType.JSON + ).build(); + + final SetOnce responseSetOnce = new SetOnce<>(); + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + @Override + public void sendResponse(RestResponse restResponse) { + responseSetOnce.set(restResponse); + } + }; + + try (var threadPool = createThreadPool()) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { + @SuppressWarnings("unchecked") + @Override + public void doExecute( + ActionType action, + Request request, + ActionListener listener + ) { + QueryUserRequest queryUserRequest = (QueryUserRequest) request; + final QueryBuilder queryBuilder = queryUserRequest.getQueryBuilder(); + assertNotNull(queryBuilder); + assertThat(queryBuilder.getClass(), is(BoolQueryBuilder.class)); + final BoolQueryBuilder boolQueryBuilder = (BoolQueryBuilder) queryBuilder; + assertTrue(boolQueryBuilder.filter().isEmpty()); + assertTrue(boolQueryBuilder.mustNot().isEmpty()); + assertThat(boolQueryBuilder.must(), hasSize(1)); + final QueryBuilder mustQueryBuilder = boolQueryBuilder.must().get(0); + assertThat(mustQueryBuilder.getClass(), is(TermsQueryBuilder.class)); + assertThat(((TermsQueryBuilder) mustQueryBuilder).fieldName(), equalTo("username")); + assertThat(boolQueryBuilder.should(), hasSize(1)); + final QueryBuilder shouldQueryBuilder = boolQueryBuilder.should().get(0); + assertThat(shouldQueryBuilder.getClass(), is(PrefixQueryBuilder.class)); + assertThat(((PrefixQueryBuilder) shouldQueryBuilder).fieldName(), equalTo("username")); + listener.onResponse((Response) new QueryUserResponse(0, List.of())); + } + }; + final RestQueryUserAction restQueryUserAction = new RestQueryUserAction(Settings.EMPTY, mockLicenseState); + restQueryUserAction.handleRequest(restRequest, restChannel, client); + } + + assertNotNull(responseSetOnce.get()); + } + + public void testParsingSearchParameters() throws Exception { + final String requestBody = """ + { + "query": { + "match_all": {} + }, + "from": 42, + "size": 20, + "sort": [ "username", "full_name"], + "search_after": [ "bart" ] + }"""; + + final FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray(requestBody), + XContentType.JSON + ).build(); + + final SetOnce responseSetOnce = new SetOnce<>(); + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + @Override + public void sendResponse(RestResponse restResponse) { + responseSetOnce.set(restResponse); + } + }; + + try (var threadPool = createThreadPool()) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { + @SuppressWarnings("unchecked") + @Override + public void doExecute( + ActionType action, + Request request, + ActionListener listener + ) { + QueryUserRequest queryUserRequest = (QueryUserRequest) request; + final QueryBuilder queryBuilder = queryUserRequest.getQueryBuilder(); + assertNotNull(queryBuilder); + assertThat(queryBuilder.getClass(), is(MatchAllQueryBuilder.class)); + assertThat(queryUserRequest.getFrom(), equalTo(42)); + assertThat(queryUserRequest.getSize(), equalTo(20)); + final List fieldSortBuilders = queryUserRequest.getFieldSortBuilders(); + assertThat(fieldSortBuilders, hasSize(2)); + + assertThat(fieldSortBuilders.get(0), equalTo(new FieldSortBuilder("username"))); + assertThat(fieldSortBuilders.get(1), equalTo(new FieldSortBuilder("full_name"))); + + final SearchAfterBuilder searchAfterBuilder = queryUserRequest.getSearchAfterBuilder(); + assertThat(searchAfterBuilder, equalTo(new SearchAfterBuilder().setSortValues(new String[] { "bart" }))); + + listener.onResponse((Response) new QueryUserResponse(0, List.of())); + } + }; + + final RestQueryUserAction queryUserAction = new RestQueryUserAction(Settings.EMPTY, mockLicenseState); + queryUserAction.handleRequest(restRequest, restChannel, client); + } + assertNotNull(responseSetOnce.get()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/UserBoolQueryBuilderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/UserBoolQueryBuilderTests.java new file mode 100644 index 0000000000000..460980d318786 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/UserBoolQueryBuilderTests.java @@ -0,0 +1,221 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.support; + +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.DistanceFeatureQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.MultiTermQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.query.SpanQueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.index.query.WildcardQueryBuilder; +import org.elasticsearch.indices.TermsLookup; +import org.elasticsearch.script.Script; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Predicate; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.collection.IsCollectionWithSize.hasSize; +import static org.hamcrest.core.Is.is; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.hamcrest.core.IsIterableContaining.hasItem; +import static org.hamcrest.core.StringContains.containsString; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class UserBoolQueryBuilderTests extends ESTestCase { + private static final String[] allowedIndexFieldNames = new String[] { "username", "roles", "full_name", "email", "enabled" }; + + public void testBuildFromSimpleQuery() { + final QueryBuilder query = randomSimpleQuery(); + final UserBoolQueryBuilder userQueryBuilder = UserBoolQueryBuilder.build(query); + assertCommonFilterQueries(userQueryBuilder); + final List mustQueries = userQueryBuilder.must(); + assertThat(mustQueries, hasSize(1)); + assertThat(mustQueries.get(0), equalTo(query)); + assertTrue(userQueryBuilder.should().isEmpty()); + assertTrue(userQueryBuilder.mustNot().isEmpty()); + } + + public void testBuildFromBoolQuery() { + final BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery(); + + if (randomBoolean()) { + boolQueryBuilder.must(QueryBuilders.prefixQuery(randomAllowedField(), "bar")); + } + if (randomBoolean()) { + boolQueryBuilder.should(QueryBuilders.wildcardQuery(randomAllowedField(), "*ar*")); + } + if (randomBoolean()) { + boolQueryBuilder.filter(QueryBuilders.termsQuery("roles", randomArray(3, 8, String[]::new, () -> "role-" + randomInt()))); + } + if (randomBoolean()) { + boolQueryBuilder.minimumShouldMatch(randomIntBetween(1, 2)); + } + final UserBoolQueryBuilder userBoolQueryBuilder = UserBoolQueryBuilder.build(boolQueryBuilder); + assertCommonFilterQueries(userBoolQueryBuilder); + + assertThat(userBoolQueryBuilder.must(), hasSize(1)); + assertThat(userBoolQueryBuilder.should(), empty()); + assertThat(userBoolQueryBuilder.mustNot(), empty()); + assertThat(userBoolQueryBuilder.filter(), hasItem(QueryBuilders.termQuery("type", "user"))); + assertThat(userBoolQueryBuilder.must().get(0).getClass(), is(BoolQueryBuilder.class)); + final BoolQueryBuilder translated = (BoolQueryBuilder) userBoolQueryBuilder.must().get(0); + assertThat(translated.must(), equalTo(boolQueryBuilder.must())); + assertThat(translated.should(), equalTo(boolQueryBuilder.should())); + assertThat(translated.mustNot(), equalTo(boolQueryBuilder.mustNot())); + assertThat(translated.minimumShouldMatch(), equalTo(boolQueryBuilder.minimumShouldMatch())); + assertThat(translated.filter(), equalTo(boolQueryBuilder.filter())); + } + + public void testFieldNameTranslation() { + String field = randomAllowedField(); + final WildcardQueryBuilder wildcardQueryBuilder = QueryBuilders.wildcardQuery(field, "*" + randomAlphaOfLength(3)); + final UserBoolQueryBuilder userBoolQueryBuilder = UserBoolQueryBuilder.build(wildcardQueryBuilder); + assertCommonFilterQueries(userBoolQueryBuilder); + assertThat(userBoolQueryBuilder.must().get(0), equalTo(QueryBuilders.wildcardQuery(field, wildcardQueryBuilder.value()))); + } + + public void testAllowListOfFieldNames() { + final String fieldName = randomValueOtherThanMany( + v -> Arrays.asList(allowedIndexFieldNames).contains(v), + () -> randomFrom(randomAlphaOfLengthBetween(3, 20), "type", "password") + ); + + // MatchAllQueryBuilder doesn't do any translation, so skip + final QueryBuilder q1 = randomValueOtherThanMany( + q -> q.getClass() == MatchAllQueryBuilder.class, + () -> randomSimpleQuery(fieldName) + ); + final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> UserBoolQueryBuilder.build(q1)); + + assertThat(exception.getMessage(), containsString("Field [" + fieldName + "] is not allowed")); + } + + public void testTermsLookupIsNotAllowed() { + final TermsQueryBuilder q1 = QueryBuilders.termsLookupQuery("roles", new TermsLookup("lookup", "1", "id")); + final IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, () -> UserBoolQueryBuilder.build(q1)); + assertThat(e1.getMessage(), containsString("Terms query with terms lookup is not supported for User query")); + } + + public void testDisallowedQueryTypes() { + final AbstractQueryBuilder> q1 = randomFrom( + QueryBuilders.idsQuery(), + QueryBuilders.rangeQuery(randomAlphaOfLength(5)), + QueryBuilders.matchQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.constantScoreQuery(mock(QueryBuilder.class)), + QueryBuilders.boostingQuery(mock(QueryBuilder.class), mock(QueryBuilder.class)), + QueryBuilders.queryStringQuery("q=a:42"), + QueryBuilders.simpleQueryStringQuery(randomAlphaOfLength(5)), + QueryBuilders.combinedFieldsQuery(randomAlphaOfLength(5)), + QueryBuilders.disMaxQuery(), + QueryBuilders.distanceFeatureQuery( + randomAlphaOfLength(5), + mock(DistanceFeatureQueryBuilder.Origin.class), + randomAlphaOfLength(5) + ), + QueryBuilders.fieldMaskingSpanQuery(mock(SpanQueryBuilder.class), randomAlphaOfLength(5)), + QueryBuilders.functionScoreQuery(mock(QueryBuilder.class)), + QueryBuilders.fuzzyQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.wrapperQuery(randomAlphaOfLength(5)), + QueryBuilders.matchBoolPrefixQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.matchPhraseQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.matchPhrasePrefixQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.moreLikeThisQuery(randomArray(1, 3, String[]::new, () -> randomAlphaOfLength(5))), + QueryBuilders.regexpQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.spanTermQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.spanOrQuery(mock(SpanQueryBuilder.class)), + QueryBuilders.spanContainingQuery(mock(SpanQueryBuilder.class), mock(SpanQueryBuilder.class)), + QueryBuilders.spanFirstQuery(mock(SpanQueryBuilder.class), randomIntBetween(1, 3)), + QueryBuilders.spanMultiTermQueryBuilder(mock(MultiTermQueryBuilder.class)), + QueryBuilders.spanNotQuery(mock(SpanQueryBuilder.class), mock(SpanQueryBuilder.class)), + QueryBuilders.scriptQuery(new Script(randomAlphaOfLength(5))), + QueryBuilders.scriptScoreQuery(mock(QueryBuilder.class), new Script(randomAlphaOfLength(5))), + QueryBuilders.geoWithinQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.geoBoundingBoxQuery(randomAlphaOfLength(5)), + QueryBuilders.geoDisjointQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.geoDistanceQuery(randomAlphaOfLength(5)), + QueryBuilders.geoIntersectionQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.geoShapeQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)) + ); + + final IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, () -> UserBoolQueryBuilder.build(q1)); + assertThat(e1.getMessage(), containsString("Query type [" + q1.getName() + "] is not supported for User query")); + } + + public void testWillSetAllowedFields() { + final UserBoolQueryBuilder userBoolQueryBuilder = UserBoolQueryBuilder.build(randomSimpleQuery()); + + final SearchExecutionContext context = mock(SearchExecutionContext.class); + doAnswer(invocationOnMock -> { + final Object[] args = invocationOnMock.getArguments(); + @SuppressWarnings("unchecked") + final Predicate predicate = (Predicate) args[0]; + assertTrue(predicate.getClass().getName().startsWith(UserBoolQueryBuilder.class.getName())); + testAllowedIndexFieldName(predicate); + return null; + }).when(context).setAllowedFields(any()); + try { + if (randomBoolean()) { + userBoolQueryBuilder.doToQuery(context); + } else { + userBoolQueryBuilder.doRewrite(context); + } + } catch (Exception e) { + // just ignore any exception from superclass since we only need verify the allowedFields are set + } finally { + verify(context).setAllowedFields(any()); + } + } + + private void testAllowedIndexFieldName(Predicate predicate) { + final String allowedField = randomAllowedField(); + assertTrue(predicate.test(allowedField)); + + final String disallowedField = randomBoolean() ? (randomAlphaOfLengthBetween(1, 3) + allowedField) : (allowedField.substring(1)); + assertFalse(predicate.test(disallowedField)); + } + + private void assertCommonFilterQueries(UserBoolQueryBuilder qb) { + final List tqb = qb.filter() + .stream() + .filter(q -> q.getClass() == TermQueryBuilder.class) + .map(q -> (TermQueryBuilder) q) + .toList(); + assertTrue(tqb.stream().anyMatch(q -> q.equals(QueryBuilders.termQuery("type", "user")))); + } + + private String randomAllowedField() { + return randomFrom(allowedIndexFieldNames); + } + + private QueryBuilder randomSimpleQuery() { + return randomSimpleQuery(randomAllowedField()); + } + + private QueryBuilder randomSimpleQuery(String fieldName) { + return randomFrom( + QueryBuilders.termQuery(fieldName, randomAlphaOfLengthBetween(3, 8)), + QueryBuilders.termsQuery(fieldName, randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(3, 8))), + QueryBuilders.prefixQuery(fieldName, randomAlphaOfLength(randomIntBetween(3, 10))), + QueryBuilders.wildcardQuery(fieldName, "*" + randomAlphaOfLength(randomIntBetween(3, 10))), + QueryBuilders.matchAllQuery(), + QueryBuilders.existsQuery(fieldName) + ); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java index c87ddd116b138..8c422342c3640 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.mocksocket.MockSocket; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -77,7 +78,7 @@ public final class SecurityNetty4HeaderSizeLimitTests extends ESTestCase { @Before public void startThreadPool() { - threadPool = new ThreadPool(settings); + threadPool = new ThreadPool(settings, MeterRegistry.NOOP); TaskManager taskManager = new TaskManager(settings, threadPool, Collections.emptySet()); NetworkService networkService = new NetworkService(Collections.emptyList()); PageCacheRecycler recycler = new MockPageCacheRecycler(Settings.EMPTY); diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java index 0de11109e33e7..d940f366ef942 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java @@ -88,7 +88,7 @@ public InternalGeoLine(StreamInput in) throws IOException { this.includeSorts = in.readBoolean(); this.sortOrder = SortOrder.readFromStream(in); this.size = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { nonOverlapping = in.readBoolean(); simplified = in.readBoolean(); } else { @@ -105,7 +105,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeBoolean(includeSorts); sortOrder.writeTo(out); out.writeVInt(size); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(nonOverlapping); out.writeBoolean(simplified); } diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java index f667ae4b80d03..7ad54901e2d06 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction; import org.elasticsearch.action.support.ActionFilter; @@ -271,7 +270,7 @@ protected TaskId cancelTaskWithXOpaqueId(String id, String action) { TaskId taskId = findTaskWithXOpaqueId(id, action); assertNotNull(taskId); logger.trace("Cancelling task " + taskId); - CancelTasksResponse response = clusterAdmin().prepareCancelTasks().setTargetTaskId(taskId).get(); + ListTasksResponse response = clusterAdmin().prepareCancelTasks().setTargetTaskId(taskId).get(); assertThat(response.getTasks(), hasSize(1)); assertThat(response.getTasks().get(0).action(), equalTo(action)); logger.trace("Task is cancelled " + taskId); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitCursor.java index 36a42aaad7161..8fa41017762a7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitCursor.java @@ -156,14 +156,12 @@ static void handle( logSearchResponse(response, log); } - SearchHit[] hits = response.getHits().getHits(); - SearchHitRowSet rowSet = makeRowSet.get(); if (rowSet.hasRemaining() == false) { closePointInTime(client, response.pointInTimeId(), listener.delegateFailureAndWrap((l, r) -> l.onResponse(Page.last(rowSet)))); } else { - updateSearchAfter(hits, source); + updateSearchAfter(response.getHits().getHits(), source); SearchHitCursor nextCursor = new SearchHitCursor( source, diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java index ba6a9854e4254..b6e3e8b759352 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java @@ -28,9 +28,8 @@ * Extracts rows from an array of {@link SearchHit}. */ class SearchHitRowSet extends ResultRowSet { - private final SearchHit[] hits; + private final SearchHits hits; private final Map> flatInnerHits = new HashMap<>(); - private final Set innerHits = new LinkedHashSet<>(); private final String innerHit; private final int size; @@ -42,13 +41,14 @@ class SearchHitRowSet extends ResultRowSet { SearchHitRowSet(List exts, BitSet mask, int sizeRequested, int limit, SearchResponse response) { super(exts, mask); - this.hits = response.getHits().getHits(); + this.hits = response.getHits().asUnpooled(); // Since the results might contain nested docs, the iteration is similar to that of Aggregation // namely it discovers the nested docs and then, for iteration, increments the deepest level first // and eventually carries that over to the top level String innerHit = null; + Set innerHits = new LinkedHashSet<>(); for (HitExtractor ex : exts) { if (ex.hitName() != null) { innerHits.add(ex.hitName()); @@ -58,7 +58,7 @@ class SearchHitRowSet extends ResultRowSet { } } - int sz = hits.length; + int sz = hits.getHits().length; int maxDepth = 0; if (innerHits.isEmpty() == false) { @@ -106,7 +106,7 @@ protected Object extractValue(HitExtractor e) { int extractorLevel = e.hitName() == null ? 0 : 1; SearchHit hit = null; - SearchHit[] sh = hits; + SearchHit[] sh = hits.getHits(); for (int lvl = 0; lvl <= extractorLevel; lvl++) { // TODO: add support for multi-nested doc if (hit != null) { @@ -172,7 +172,7 @@ protected boolean doNext() { // increment last row indexPerLevel[indexPerLevel.length - 1]++; // then check size - SearchHit[] sh = hits; + SearchHit[] sh = hits.getHits(); for (int lvl = 0; lvl < indexPerLevel.length; lvl++) { if (indexPerLevel[lvl] == sh.length) { // reset the current branch @@ -181,7 +181,7 @@ protected boolean doNext() { indexPerLevel[lvl - 1]++; // restart the loop lvl = 0; - sh = hits; + sh = hits.getHits(); } else { SearchHit h = sh[indexPerLevel[lvl]]; // TODO: improve this for multi-nested responses diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java index d06a239e61ce7..112be29d2dcd8 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java @@ -82,7 +82,7 @@ public void testGet() { double value = randomDouble(); double expected = Math.log(value); DocumentField field = new DocumentField(fieldName, singletonList(value)); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); assertEquals(expected, extractor.process(hit)); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java index 5c3fc378d90c1..b951f96e8b933 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java @@ -94,7 +94,7 @@ public void testGetDottedValueWithDocValues() { } DocumentField field = new DocumentField(fieldName, documentFieldValues); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); Object result = documentFieldValues.isEmpty() ? null : documentFieldValues.get(0); assertEquals(result, extractor.extract(hit)); @@ -112,7 +112,7 @@ public void testGetDocValue() { documentFieldValues.add(randomValue()); } DocumentField field = new DocumentField(fieldName, documentFieldValues); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); Object result = documentFieldValues.isEmpty() ? null : documentFieldValues.get(0); assertEquals(result, extractor.extract(hit)); @@ -127,7 +127,7 @@ public void testGetDate() { ZonedDateTime zdt = DateUtils.asDateTimeWithMillis(millis, zoneId).plusNanos(nanosOnly); List documentFieldValues = Collections.singletonList(StringUtils.toString(zdt)); DocumentField field = new DocumentField("my_date_nanos_field", documentFieldValues); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField("my_date_nanos_field", field); FieldHitExtractor extractor = new FieldHitExtractor("my_date_nanos_field", DATETIME, zoneId, LENIENT); assertEquals(zdt, extractor.extract(hit)); @@ -144,7 +144,7 @@ public void testMultiValuedDocValue() { String fieldName = randomAlphaOfLength(5); FieldHitExtractor fe = getFieldHitExtractor(fieldName); DocumentField field = new DocumentField(fieldName, asList("a", "b")); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); Exception ex = expectThrows(InvalidArgumentException.class, () -> fe.extract(hit)); assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); @@ -154,7 +154,7 @@ public void testExtractSourcePath() { FieldHitExtractor fe = getFieldHitExtractor("a.b.c"); Object value = randomValue(); DocumentField field = new DocumentField("a.b.c", singletonList(value)); - SearchHit hit = new SearchHit(1, null, null); + SearchHit hit = SearchHit.unpooled(1, null, null); hit.setDocumentField("a.b.c", field); assertThat(fe.extract(hit), is(value)); } @@ -163,7 +163,7 @@ public void testMultiValuedSource() { FieldHitExtractor fe = getFieldHitExtractor("a"); Object value = randomValue(); DocumentField field = new DocumentField("a", asList(value, value)); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField("a", field); Exception ex = expectThrows(InvalidArgumentException.class, () -> fe.extract(hit)); assertThat(ex.getMessage(), is("Arrays (returned by [a]) are not supported")); @@ -174,7 +174,7 @@ public void testMultiValuedSourceAllowed() { Object valueA = randomValue(); Object valueB = randomValue(); DocumentField field = new DocumentField("a", asList(valueA, valueB)); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField("a", field); assertEquals(valueA, fe.extract(hit)); } @@ -187,7 +187,7 @@ public void testGeoShapeExtraction() { map.put("coordinates", asList(1d, 2d)); map.put("type", "Point"); DocumentField field = new DocumentField(fieldName, singletonList(map)); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); assertEquals(new GeoShape(1, 2), fe.extract(hit)); @@ -204,14 +204,14 @@ public void testMultipleGeoShapeExtraction() { map2.put("coordinates", asList(3d, 4d)); map2.put("type", "Point"); DocumentField field = new DocumentField(fieldName, asList(map1, map2)); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); Exception ex = expectThrows(InvalidArgumentException.class, () -> fe.extract(hit)); assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); FieldHitExtractor lenientFe = new FieldHitExtractor(fieldName, randomBoolean() ? GEO_SHAPE : SHAPE, UTC, LENIENT); - SearchHit searchHit = new SearchHit(1, "1"); + SearchHit searchHit = SearchHit.unpooled(1, "1"); searchHit.setDocumentField(fieldName, new DocumentField(fieldName, singletonList(map2))); assertEquals(new GeoShape(3, 4), lenientFe.extract(searchHit)); } @@ -223,7 +223,7 @@ public void testUnsignedLongExtraction() { String fieldName = randomAlphaOfLength(10); DocumentField field = new DocumentField(fieldName, singletonList(value)); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); FieldHitExtractor fe = new FieldHitExtractor(fieldName, UNSIGNED_LONG, randomZone(), randomBoolean() ? NONE : LENIENT); @@ -237,7 +237,7 @@ public void testVersionExtraction() { String fieldName = randomAlphaOfLength(10); DocumentField field = new DocumentField(fieldName, singletonList(value)); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); FieldHitExtractor fe = new FieldHitExtractor(fieldName, VERSION, randomZone(), randomBoolean() ? NONE : LENIENT); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ScoreExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ScoreExtractorTests.java index fdce6cbcf0c2f..5d007218aeeb1 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ScoreExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ScoreExtractorTests.java @@ -14,7 +14,7 @@ public void testGet() { int times = between(1, 1000); for (int i = 0; i < times; i++) { float score = randomFloat(); - SearchHit hit = new SearchHit(1); + SearchHit hit = SearchHit.unpooled(1); hit.score(score); assertEquals(score, ScoreExtractor.INSTANCE.extract(hit)); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java index b7f123f82cf98..9e83df706a77b 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java @@ -115,7 +115,7 @@ public void testExtractUnsignedLong() { private SearchHits searchHitsOf(Object value) { TotalHits totalHits = new TotalHits(10, TotalHits.Relation.EQUAL_TO); - SearchHit searchHit = new SearchHit(1, "docId"); + SearchHit searchHit = SearchHit.unpooled(1, "docId"); searchHit.addDocumentFields( Collections.singletonMap("topHitsAgg", new DocumentField("field", Collections.singletonList(value))), Collections.singletonMap( @@ -123,6 +123,6 @@ private SearchHits searchHitsOf(Object value) { new DocumentField("_ignored", Collections.singletonList(randomValueOtherThan(value, () -> randomAlphaOfLength(5)))) ) ); - return new SearchHits(new SearchHit[] { searchHit }, totalHits, 0.0f); + return SearchHits.unpooled(new SearchHit[] { searchHit }, totalHits, 0.0f); } } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/110_all_null.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/110_all_null.yml new file mode 100644 index 0000000000000..0f8dbbb97f57f --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/110_all_null.yml @@ -0,0 +1,263 @@ +--- +setup: + - skip: + version: " - 8.12.99" + reason: "feature added in 8.13" + + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 5 + mappings: + properties: + data: + type: long + data_d: + type: double + count: + type: long + count_d: + type: double + time: + type: long + color: + type: keyword + always_null: + type: keyword + non_null_out_of_match: + type: keyword + - do: + bulk: + index: "test" + refresh: true + body: + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275187, "color": "red" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275188, "color": "blue" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275189, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275190, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275191, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275192, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275193, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275194, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275195, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275196, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275197, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275198, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275199, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275200, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275201, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275202, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275203, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275204, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275205, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275206, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275207, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275208, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275209, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275210, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275211, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275212, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275213, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275214, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275215, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275216, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275217, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275218, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275219, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275220, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275221, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275222, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275223, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275224, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275225, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275226, "color": "red", "non_null_out_of_match": "a" } + +--- +row wise and keep null: + - do: + esql.query: + drop_null_columns: false + body: + query: 'FROM test | WHERE time <= 1674835275188 | SORT time ASC | LIMIT 2' + columnar: false + + - length: {columns: 8} + - match: {columns.0.name: "always_null"} + - match: {columns.0.type: "keyword"} + - match: {columns.1.name: "color"} + - match: {columns.1.type: "keyword"} + - match: {columns.2.name: "count"} + - match: {columns.2.type: "long"} + - match: {columns.3.name: "count_d"} + - match: {columns.3.type: "double"} + - match: {columns.4.name: "data"} + - match: {columns.4.type: "long"} + - match: {columns.5.name: "data_d"} + - match: {columns.5.type: "double"} + - match: {columns.6.name: "non_null_out_of_match"} + - match: {columns.6.type: "keyword"} + - match: {columns.7.name: "time"} + - match: {columns.7.type: "long"} + - length: {values: 2} + - length: {values.0: 8} + - is_false: values.0.0 + - match: {values.0.1: red} + +--- +row wise and drop null: + - do: + esql.query: + drop_null_columns: true + body: + query: 'FROM test | WHERE time <= 1674835275188 | SORT time ASC | LIMIT 2' + columnar: false + + - length: {all_columns: 8} + - match: {all_columns.0.name: "always_null"} + - match: {all_columns.0.type: "keyword"} + - match: {all_columns.1.name: "color"} + - match: {all_columns.1.type: "keyword"} + - match: {all_columns.2.name: "count"} + - match: {all_columns.2.type: "long"} + - match: {all_columns.3.name: "count_d"} + - match: {all_columns.3.type: "double"} + - match: {all_columns.4.name: "data"} + - match: {all_columns.4.type: "long"} + - match: {all_columns.5.name: "data_d"} + - match: {all_columns.5.type: "double"} + - match: {all_columns.6.name: "non_null_out_of_match"} + - match: {all_columns.6.type: "keyword"} + - match: {all_columns.7.name: "time"} + - match: {all_columns.7.type: "long"} + - length: {columns: 6} + - match: {columns.0.name: "color"} + - match: {columns.0.type: "keyword"} + - match: {columns.1.name: "count"} + - match: {columns.1.type: "long"} + - match: {columns.2.name: "count_d"} + - match: {columns.2.type: "double"} + - match: {columns.3.name: "data"} + - match: {columns.3.type: "long"} + - match: {columns.4.name: "data_d"} + - match: {columns.4.type: "double"} + - match: {columns.5.name: "time"} + - match: {columns.5.type: "long"} + - length: {values: 2} + - length: {values.0: 6} + - match: {values.0.0: red} + +--- +columnar and keep null: + - do: + esql.query: + drop_null_columns: false + body: + query: 'FROM test | WHERE time <= 1674835275188 | SORT time ASC | LIMIT 2' + columnar: true + + - length: {columns: 8} + - match: {columns.0.name: "always_null"} + - match: {columns.0.type: "keyword"} + - match: {columns.1.name: "color"} + - match: {columns.1.type: "keyword"} + - match: {columns.2.name: "count"} + - match: {columns.2.type: "long"} + - match: {columns.3.name: "count_d"} + - match: {columns.3.type: "double"} + - match: {columns.4.name: "data"} + - match: {columns.4.type: "long"} + - match: {columns.5.name: "data_d"} + - match: {columns.5.type: "double"} + - match: {columns.6.name: "non_null_out_of_match"} + - match: {columns.6.type: "keyword"} + - match: {columns.7.name: "time"} + - match: {columns.7.type: "long"} + - length: {values: 8} + - length: {values.0: 2} + - is_false: values.0.0 + - match: {values.1.0: red} + +--- +columnar and drop null: + - do: + esql.query: + drop_null_columns: true + body: + query: 'FROM test | WHERE time <= 1674835275188 | SORT time ASC | LIMIT 2' + columnar: true + + - length: {all_columns: 8} + - match: {all_columns.0.name: "always_null"} + - match: {all_columns.0.type: "keyword"} + - match: {all_columns.1.name: "color"} + - match: {all_columns.1.type: "keyword"} + - match: {all_columns.2.name: "count"} + - match: {all_columns.2.type: "long"} + - match: {all_columns.3.name: "count_d"} + - match: {all_columns.3.type: "double"} + - match: {all_columns.4.name: "data"} + - match: {all_columns.4.type: "long"} + - match: {all_columns.5.name: "data_d"} + - match: {all_columns.5.type: "double"} + - match: {all_columns.6.name: "non_null_out_of_match"} + - match: {all_columns.6.type: "keyword"} + - match: {all_columns.7.name: "time"} + - match: {all_columns.7.type: "long"} + - length: {columns: 6} + - match: {columns.0.name: "color"} + - match: {columns.0.type: "keyword"} + - match: {columns.1.name: "count"} + - match: {columns.1.type: "long"} + - match: {columns.2.name: "count_d"} + - match: {columns.2.type: "double"} + - match: {columns.3.name: "data"} + - match: {columns.3.type: "long"} + - match: {columns.4.name: "data_d"} + - match: {columns.4.type: "double"} + - match: {columns.5.name: "time"} + - match: {columns.5.type: "long"} + - length: {values: 6} + - length: {values.0: 2} + - match: {values.0.0: red} diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml index 06fc2c8a3fa99..8b28776e42fcd 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml @@ -142,7 +142,7 @@ unsupported: - match: { columns.9.name: geo_point_alias } - match: { columns.9.type: geo_point } - match: { columns.10.name: geo_shape } - - match: { columns.10.type: unsupported } + - match: { columns.10.type: geo_shape } - match: { columns.11.name: histogram } - match: { columns.11.type: unsupported } - match: { columns.12.name: integer_range } @@ -170,7 +170,7 @@ unsupported: - match: { columns.23.name: search_as_you_type._index_prefix } - match: { columns.23.type: unsupported } - match: { columns.24.name: shape } - - match: { columns.24.type: unsupported } + - match: { columns.24.type: cartesian_shape } - match: { columns.25.name: some_doc.bar } - match: { columns.25.type: long } - match: { columns.26.name: some_doc.foo } @@ -191,7 +191,7 @@ unsupported: - match: { values.0.7: null } - match: { values.0.8: "POINT (10.0 12.0)" } - match: { values.0.9: "POINT (10.0 12.0)" } - - match: { values.0.10: null } + - match: { values.0.10: "LINESTRING (-97.154 25.996, -97.159 25.998, -97.181 25.991, -97.187 25.985)" } - match: { values.0.11: null } - match: { values.0.12: null } - match: { values.0.13: null } @@ -205,7 +205,7 @@ unsupported: - match: { values.0.21: null } - match: { values.0.22: null } - match: { values.0.23: null } - - match: { values.0.24: null } + - match: { values.0.24: "LINESTRING (-377.03653 389.897676, -377.009051 389.889939)" } - match: { values.0.25: 12 } - match: { values.0.26: xy } - match: { values.0.27: "foo bar" } @@ -238,7 +238,7 @@ unsupported: - match: { columns.9.name: geo_point_alias } - match: { columns.9.type: geo_point } - match: { columns.10.name: geo_shape } - - match: { columns.10.type: unsupported } + - match: { columns.10.type: geo_shape } - match: { columns.11.name: histogram } - match: { columns.11.type: unsupported } - match: { columns.12.name: integer_range } @@ -266,7 +266,7 @@ unsupported: - match: { columns.23.name: search_as_you_type._index_prefix } - match: { columns.23.type: unsupported } - match: { columns.24.name: shape } - - match: { columns.24.type: unsupported } + - match: { columns.24.type: cartesian_shape } - match: { columns.25.name: some_doc.bar } - match: { columns.25.type: long } - match: { columns.26.name: some_doc.foo } @@ -282,8 +282,8 @@ unsupported: - do: esql.query: body: - query: 'from test | keep shape | limit 0' - - match: { columns.0.name: shape } + query: 'from test | keep histogram | limit 0' + - match: { columns.0.name: histogram } - match: { columns.0.type: unsupported } - length: { values: 0 } @@ -322,7 +322,7 @@ unsupported with sort: - match: { columns.9.name: geo_point_alias } - match: { columns.9.type: geo_point } - match: { columns.10.name: geo_shape } - - match: { columns.10.type: unsupported } + - match: { columns.10.type: geo_shape } - match: { columns.11.name: histogram } - match: { columns.11.type: unsupported } - match: { columns.12.name: integer_range } @@ -350,7 +350,7 @@ unsupported with sort: - match: { columns.23.name: search_as_you_type._index_prefix } - match: { columns.23.type: unsupported } - match: { columns.24.name: shape } - - match: { columns.24.type: unsupported } + - match: { columns.24.type: cartesian_shape } - match: { columns.25.name: some_doc.bar } - match: { columns.25.type: long } - match: { columns.26.name: some_doc.foo } @@ -371,7 +371,7 @@ unsupported with sort: - match: { values.0.7: null } - match: { values.0.8: "POINT (10.0 12.0)" } - match: { values.0.9: "POINT (10.0 12.0)" } - - match: { values.0.10: null } + - match: { values.0.10: "LINESTRING (-97.154 25.996, -97.159 25.998, -97.181 25.991, -97.187 25.985)" } - match: { values.0.11: null } - match: { values.0.12: null } - match: { values.0.13: null } @@ -385,7 +385,7 @@ unsupported with sort: - match: { values.0.21: null } - match: { values.0.22: null } - match: { values.0.23: null } - - match: { values.0.24: null } + - match: { values.0.24: "LINESTRING (-377.03653 389.897676, -377.009051 389.889939)" } - match: { values.0.25: 12 } - match: { values.0.26: xy } - match: { values.0.27: "foo bar" } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml index fdccf473b358a..69b676c92ed72 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml @@ -218,6 +218,9 @@ setup: --- "Test start deployment fails while model download in progress": + - skip: + features: fips_140 + reason: "@AwaitsFix https://github.com/elastic/elasticsearch/issues/104414" - do: ml.put_trained_model: model_id: .elser_model_2 diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml index 5adbf782f3236..8bc863e6fca9f 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml @@ -1,10 +1,8 @@ --- setup: - skip: - version: all - reason: AwaitsFix https://github.com/elastic/elasticsearch/issues/104038 - # version: " - 8.12.99" - # reason: "Universal Profiling test infrastructure is available in 8.12+" + version: " - 8.12.99" + reason: "Universal Profiling test infrastructure is available in 8.12+" - do: cluster.put_settings: diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java index 843dee43706f8..1d44ed5a1f8ef 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.TransportIndexAction; @@ -27,6 +26,7 @@ import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -848,7 +848,7 @@ public void refresh(ActionListener listener) { client.threadPool().getThreadContext(), TRANSFORM_ORIGIN, new RefreshRequest(TransformInternalIndexConstants.LATEST_INDEX_NAME), - ActionListener.wrap(r -> listener.onResponse(true), listener::onFailure), + ActionListener.wrap(r -> listener.onResponse(true), listener::onFailure), client.admin().indices()::refresh ); } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/SeqNoPrimaryTermAndIndexTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/SeqNoPrimaryTermAndIndexTests.java index d76b6b67368f9..69139bc3f7561 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/SeqNoPrimaryTermAndIndexTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/SeqNoPrimaryTermAndIndexTests.java @@ -29,7 +29,7 @@ public void testEquals() { } public void testFromSearchHit() { - SearchHit searchHit = new SearchHit(1); + SearchHit searchHit = SearchHit.unpooled(1); long seqNo = randomLongBetween(-2, 10_000); long primaryTerm = randomLongBetween(-2, 10_000); String index = randomAlphaOfLength(10); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java index 8ee7e902285c9..fa8e867d77a49 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java @@ -544,7 +544,11 @@ protected void ActionListener.respondAndRelease( listener, (Response) new SearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(1) }, + new TotalHits(1L, TotalHits.Relation.EQUAL_TO), + 1.0f + ), // Simulate completely null aggs null, new Suggest(Collections.emptyList()), diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java index 5dee74cccee7a..a18c926e21da6 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java @@ -513,7 +513,7 @@ public void testRetentionPolicyDeleteByQueryThrowsIrrecoverable() throws Excepti ); final SearchResponse searchResponse = new SearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + SearchHits.unpooled(new SearchHit[] { SearchHit.unpooled(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), // Simulate completely null aggs null, new Suggest(Collections.emptyList()), @@ -606,7 +606,7 @@ public void testRetentionPolicyDeleteByQueryThrowsTemporaryProblem() throws Exce ); final SearchResponse searchResponse = new SearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + SearchHits.unpooled(new SearchHit[] { SearchHit.unpooled(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), // Simulate completely null aggs null, new Suggest(Collections.emptyList()), @@ -702,7 +702,7 @@ public void testFailureCounterIsResetOnSuccess() throws Exception { ); final SearchResponse searchResponse = new SearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + SearchHits.unpooled(new SearchHit[] { SearchHit.unpooled(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), // Simulate completely null aggs null, new Suggest(Collections.emptyList()), diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java index 708cb3d93cbed..512fd7a2383a1 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; @@ -112,7 +113,7 @@ public void testTermsFieldCollector() throws IOException { Aggregations aggs = new Aggregations(Collections.singletonList(composite)); SearchResponse response = new SearchResponse( - null, + SearchHits.EMPTY_WITH_TOTAL_HITS, aggs, null, false, diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java index dab6d8518d28f..fd4e60e485200 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; @@ -171,7 +172,7 @@ private static QueryBuilder buildFilterQuery(ChangeCollector collector) { private static SearchResponse buildSearchResponse(SingleValue minTimestamp, SingleValue maxTimestamp) { return new SearchResponse( - null, + SearchHits.EMPTY_WITH_TOTAL_HITS, new Aggregations(Arrays.asList(minTimestamp, maxTimestamp)), null, false, diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java index 67f923769ffe3..be0bb177267bc 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; @@ -19,6 +20,7 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.search.SearchHits; @@ -39,6 +41,8 @@ import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; import org.elasticsearch.xpack.core.transform.transforms.SettingsConfigTests; import org.elasticsearch.xpack.core.transform.transforms.SourceConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; import org.elasticsearch.xpack.core.transform.transforms.pivot.AggregationConfig; import org.elasticsearch.xpack.core.transform.transforms.pivot.AggregationConfigTests; import org.elasticsearch.xpack.core.transform.transforms.pivot.GroupConfig; @@ -239,7 +243,30 @@ public void testProcessSearchResponse() { SettingsConfigTests.randomSettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet() - ); + ) { + @Override + public Tuple, Map> processSearchResponse( + SearchResponse searchResponse, + String destinationIndex, + String destinationPipeline, + Map fieldTypeMap, + TransformIndexerStats stats, + TransformProgress progress + ) { + try { + return super.processSearchResponse( + searchResponse, + destinationIndex, + destinationPipeline, + fieldTypeMap, + stats, + progress + ); + } finally { + searchResponse.decRef(); + } + } + }; Aggregations aggs = null; assertThat(pivot.processSearchResponse(searchResponseFromAggs(aggs), null, null, null, null, null), is(nullValue())); @@ -324,7 +351,22 @@ public void testPreviewForCompositeAggregation() throws Exception { } private static SearchResponse searchResponseFromAggs(Aggregations aggs) { - return new SearchResponse(null, aggs, null, false, null, null, 1, null, 10, 5, 0, 0, new ShardSearchFailure[0], null); + return new SearchResponse( + SearchHits.EMPTY_WITH_TOTAL_HITS, + aggs, + null, + false, + null, + null, + 1, + null, + 10, + 5, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null + ); } private class MyMockClient extends NoOpClient { diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java index f02b3f865adf0..d97b0bd81a101 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java @@ -100,12 +100,12 @@ public void testExecuteWithAggs() { public void testExecuteAccessHits() throws Exception { CompareCondition condition = new CompareCondition("ctx.payload.hits.hits.0._score", CompareCondition.Op.EQ, 1, Clock.systemUTC()); - SearchHit hit = new SearchHit(0, "1"); + SearchHit hit = SearchHit.unpooled(0, "1"); hit.score(1f); hit.shard(new SearchShardTarget("a", new ShardId("a", "indexUUID", 0), null)); SearchResponse response = new SearchResponse( - new SearchHits(new SearchHit[] { hit }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1f), + SearchHits.unpooled(new SearchHit[] { hit }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1f), null, null, false, diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java index b82622fbd4819..67835971cd15a 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java @@ -180,7 +180,7 @@ public void testActionConditionWithFailures() throws Exception { final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); try { assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); - searchHitReference.set(response.getHits().getAt(0)); + searchHitReference.set(response.getHits().getAt(0).asUnpooled()); } finally { response.decRef(); } @@ -240,7 +240,7 @@ public void testActionCondition() throws Exception { final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); try { assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); - searchHitReference.set(response.getHits().getAt(0)); + searchHitReference.set(response.getHits().getAt(0).asUnpooled()); } finally { response.decRef(); } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java index d1153b6eca3e6..265b252082c68 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.watcher.test.integration; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; import org.elasticsearch.rest.RestStatus; @@ -65,7 +65,7 @@ public void testThatLoadingWithNonExistingIndexWorks() throws Exception { ensureGreen(HistoryStoreField.DATA_STREAM); assertBusy(() -> { - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh(".watcher-history*").get(); + BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh(".watcher-history*").get(); assertThat(refreshResponse.getStatus(), equalTo(RestStatus.OK)); assertResponse( prepareSearch(".watcher-history*").setSize(0), diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java index ea9295600fe41..a067b99c6bff0 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -10,11 +10,11 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -404,7 +404,7 @@ private Collection loadWatches(ClusterState clusterState) { // Non private for unit testing purposes void refreshWatches(IndexMetadata indexMetadata) { - RefreshResponse refreshResponse = client.admin() + BroadcastResponse refreshResponse = client.admin() .indices() .refresh(new RefreshRequest(INDEX)) .actionGet(TimeValue.timeValueSeconds(5)); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index c2ed68d8fa1bd..19bac967c576a 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.SearchRequest; @@ -21,6 +20,7 @@ import org.elasticsearch.action.search.TransportClearScrollAction; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.search.TransportSearchScrollAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -162,12 +162,12 @@ void stopExecutor() {} ClusterState clusterState = csBuilder.build(); // response setup, successful refresh response - RefreshResponse refreshResponse = mock(RefreshResponse.class); + BroadcastResponse refreshResponse = mock(BroadcastResponse.class); when(refreshResponse.getSuccessfulShards()).thenReturn( clusterState.getMetadata().getIndices().get(Watch.INDEX).getNumberOfShards() ); doAnswer(invocation -> { - ActionListener listener = (ActionListener) invocation.getArguments()[2]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(refreshResponse); return null; }).when(client).execute(eq(RefreshAction.INSTANCE), any(RefreshRequest.class), anyActionListener()); @@ -196,7 +196,7 @@ void stopExecutor() {} SearchHit[] hits = new SearchHit[count]; for (int i = 0; i < count; i++) { String id = String.valueOf(i); - SearchHit hit = new SearchHit(1, id); + SearchHit hit = SearchHit.unpooled(1, id); hit.version(1L); hit.shard(new SearchShardTarget("nodeId", new ShardId(watchIndex, 0), "whatever")); hits[i] = hit; @@ -212,7 +212,7 @@ void stopExecutor() {} when(watch.status()).thenReturn(watchStatus); when(parser.parse(eq(id), eq(true), any(), eq(XContentType.JSON), anyLong(), anyLong())).thenReturn(watch); } - SearchHits searchHits = new SearchHits(hits, new TotalHits(count, TotalHits.Relation.EQUAL_TO), 1.0f); + SearchHits searchHits = SearchHits.unpooled(hits, new TotalHits(count, TotalHits.Relation.EQUAL_TO), 1.0f); doAnswer(invocation -> { ActionListener listener = (ActionListener) invocation.getArguments()[2]; ActionListener.respondAndRelease( diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java index 60fa2581b4218..b75ac51c3510f 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkProcessor2; @@ -24,6 +23,7 @@ import org.elasticsearch.action.search.TransportClearScrollAction; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.search.TransportSearchScrollAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -202,7 +202,7 @@ public void testFindTriggeredWatchesGoodCase() { doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[2]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(mockRefreshResponse(1, 1)); return null; }).when(client).execute(eq(RefreshAction.INSTANCE), any(), any()); @@ -210,14 +210,14 @@ public void testFindTriggeredWatchesGoodCase() { SearchResponse searchResponse1 = mock(SearchResponse.class); when(searchResponse1.getSuccessfulShards()).thenReturn(1); when(searchResponse1.getTotalShards()).thenReturn(1); - final BytesArray source = new BytesArray("{}"); + BytesArray source = new BytesArray("{}"); { - final SearchHit hit = new SearchHit(0, "first_foo"); + SearchHit hit = SearchHit.unpooled(0, "first_foo"); hit.version(1L); hit.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null)); hit.sourceRef(source); when(searchResponse1.getHits()).thenReturn( - new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f) + SearchHits.unpooled(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f) ); } when(searchResponse1.getScrollId()).thenReturn("_scrollId"); @@ -228,20 +228,20 @@ public void testFindTriggeredWatchesGoodCase() { return null; }).when(client).execute(eq(TransportSearchAction.TYPE), any(), any()); - // First return a scroll response with a single hit and then with no hits doAnswer(invocation -> { SearchScrollRequest request = (SearchScrollRequest) invocation.getArguments()[1]; @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[2]; if (request.scrollId().equals("_scrollId")) { - final var hit2 = new SearchHit(0, "second_foo"); - hit2.version(1L); - hit2.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null)); - hit2.sourceRef(source); + // First return a scroll response with a single hit and then with no hits + var hit = SearchHit.unpooled(0, "second_foo"); + hit.version(1L); + hit.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null)); + hit.sourceRef(source); ActionListener.respondAndRelease( listener, new SearchResponse( - new SearchHits(new SearchHit[] { hit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f), + SearchHits.unpooled(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f), null, null, false, @@ -409,7 +409,7 @@ public void testIndexNotFoundButInMetadata() { doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[2]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new IndexNotFoundException(TriggeredWatchStoreField.INDEX_NAME)); return null; }).when(client).execute(eq(RefreshAction.INSTANCE), any(), any()); @@ -507,8 +507,8 @@ public void testDeleteTriggeredWatches() throws Exception { assertThat(response.getItems().length, is(1)); } - private RefreshResponse mockRefreshResponse(int total, int successful) { - RefreshResponse refreshResponse = mock(RefreshResponse.class); + private BroadcastResponse mockRefreshResponse(int total, int successful) { + BroadcastResponse refreshResponse = mock(BroadcastResponse.class); when(refreshResponse.getTotalShards()).thenReturn(total); when(refreshResponse.getSuccessfulShards()).thenReturn(successful); return refreshResponse;