Skip to content

Commit

Permalink
Merge branch 'main' into test/snapshot_v7_index_version
Browse files Browse the repository at this point in the history
  • Loading branch information
javanna authored Jan 6, 2025
2 parents fde79ee + 9cbbd2d commit 51f5928
Show file tree
Hide file tree
Showing 35 changed files with 258 additions and 587 deletions.
20 changes: 20 additions & 0 deletions docs/changelog/117519.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
pr: 117519
summary: Remove `data_frame_transforms` roles
area: Transform
type: breaking
issues: []
breaking:
title: Remove `data_frame_transforms` roles
area: Transform
details: >-
`data_frame_transforms_admin` and `data_frame_transforms_user` were deprecated in
Elasticsearch 7 and are being removed in Elasticsearch 9.
`data_frame_transforms_admin` is now `transform_admin`.
`data_frame_transforms_user` is now `transform_user`.
Users must call the `_update` API to replace the permissions on the Transform before the
Transform can be started.
impact: >-
Transforms created with either the `data_frame_transforms_admin` or the
`data_frame_transforms_user` role will fail to start. The Transform will remain
in a `stopped` state, and its health will be red while displaying permission failures.
notable: false
12 changes: 0 additions & 12 deletions docs/reference/security/authorization/built-in-roles.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -33,18 +33,6 @@ suitable for writing beats output to {es}.

--

[[built-in-roles-data-frame-transforms-admin]] `data_frame_transforms_admin` ::
Grants `manage_data_frame_transforms` cluster privileges, which enable you to
manage {transforms}. This role also includes all
{kibana-ref}/kibana-privileges.html[Kibana privileges] for the {ml-features}.
deprecated:[7.5.0,"Replaced by <<built-in-roles-transform-admin,`transform_admin`>>"].

[[built-in-roles-data-frame-transforms-user]] `data_frame_transforms_user` ::
Grants `monitor_data_frame_transforms` cluster privileges, which enable you to
use {transforms}. This role also includes all
{kibana-ref}/kibana-privileges.html[Kibana privileges] for the {ml-features}.
deprecated:[7.5.0,"Replaced by <<built-in-roles-transform-user,`transform_user`>>"].

[[built-in-roles-editor]] `editor` ::

Grants full access to all features in {kib} (including Solutions) and read-only access to data indices.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,20 @@

package org.elasticsearch.lucene;

import org.apache.http.entity.ContentType;
import org.apache.http.entity.InputStreamEntity;
import org.elasticsearch.client.Request;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.Strings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.test.XContentTestUtils;
import org.elasticsearch.test.cluster.ElasticsearchCluster;
import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider;
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
import org.elasticsearch.test.cluster.util.Version;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.xcontent.XContentType;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
Expand Down Expand Up @@ -153,9 +159,9 @@ protected static void indexDocs(String indexName, int numDocs) throws Exception
var request = new Request("POST", "/_bulk");
var docs = new StringBuilder();
IntStream.range(0, numDocs).forEach(n -> docs.append(Strings.format("""
{"index":{"_id":"%s","_index":"%s"}}
{"test":"test"}
""", n, indexName)));
{"index":{"_index":"%s"}}
{"field_0":"%s","field_1":%d,"field_2":"%s"}
""", indexName, Integer.toString(n), n, randomFrom(Locale.getAvailableLocales()).getDisplayName())));
request.setJsonEntity(docs.toString());
var response = assertOK(client().performRequest(request));
assertThat(entityAsMap(response).get("errors"), allOf(notNullValue(), is(false)));
Expand Down Expand Up @@ -192,4 +198,38 @@ protected static void restoreIndex(String repository, String snapshot, String in
assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.failed")));
assertThat(responseBody.evaluate("snapshot.shards.successful"), equalTo(0));
}

protected static void updateRandomIndexSettings(String indexName) throws IOException {
final var settings = Settings.builder();
int updates = randomIntBetween(1, 3);
for (int i = 0; i < updates; i++) {
switch (i) {
case 0 -> settings.putList(IndexSettings.DEFAULT_FIELD_SETTING.getKey(), "field_" + randomInt(2));
case 1 -> settings.put(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), randomIntBetween(1, 100));
case 2 -> settings.put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), randomLongBetween(0L, 1000L));
case 3 -> settings.put(IndexSettings.MAX_SLICES_PER_SCROLL.getKey(), randomIntBetween(1, 1024));
default -> throw new IllegalStateException();
}
}
updateIndexSettings(indexName, settings);
}

protected static void updateRandomMappings(String indexName) throws IOException {
final var runtime = new HashMap<>();
runtime.put("field_" + randomInt(2), Map.of("type", "keyword"));
final var properties = new HashMap<>();
properties.put(randomIdentifier(), Map.of("type", "long"));
var body = XContentTestUtils.convertToXContent(Map.of("runtime", runtime, "properties", properties), XContentType.JSON);
var request = new Request("PUT", indexName + "/_mappings");
request.setEntity(
new InputStreamEntity(
body.streamInput(),
body.length(),

ContentType.create(XContentType.JSON.mediaTypeWithoutParameters())
)
);
assertOK(client().performRequest(request));
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,9 @@ public void testSearchableSnapshot() throws Exception {
assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2));
assertDocCount(client(), mountedIndex, numDocs);

updateRandomIndexSettings(mountedIndex);
updateRandomMappings(mountedIndex);

logger.debug("--> adding replica to test peer-recovery");
updateIndexSettings(mountedIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1));
ensureGreen(mountedIndex);
Expand Down Expand Up @@ -130,6 +133,9 @@ public void testSearchableSnapshotUpgrade() throws Exception {

ensureGreen(mountedIndex);

updateRandomIndexSettings(mountedIndex);
updateRandomMappings(mountedIndex);

assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2));
assertDocCount(client(), mountedIndex, numDocs);
return;
Expand All @@ -141,6 +147,9 @@ public void testSearchableSnapshotUpgrade() throws Exception {
assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2));
assertDocCount(client(), mountedIndex, numDocs);

updateRandomIndexSettings(mountedIndex);
updateRandomMappings(mountedIndex);

logger.debug("--> adding replica to test peer-recovery");
updateIndexSettings(mountedIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1));
ensureGreen(mountedIndex);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,9 @@ public void testMountSearchableSnapshot() throws Exception {

ensureGreen(mountedIndex);

updateRandomIndexSettings(mountedIndex);
updateRandomMappings(mountedIndex);

assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2));
assertDocCount(client(), mountedIndex, numDocs);

Expand Down Expand Up @@ -134,6 +137,9 @@ public void testSearchableSnapshotUpgrade() throws Exception {

ensureGreen(mountedIndex);

updateRandomIndexSettings(mountedIndex);
updateRandomMappings(mountedIndex);

assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2));
assertDocCount(client(), mountedIndex, numDocs);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;

import org.apache.lucene.tests.util.TimeUnits;
import org.elasticsearch.core.UpdateForV9;
import org.elasticsearch.test.cluster.ElasticsearchCluster;
import org.elasticsearch.test.cluster.FeatureFlag;
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
Expand Down Expand Up @@ -43,15 +42,9 @@ public ClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate
super(testCandidate);
}

@UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // remove restCompat check
@ParametersFactory
public static Iterable<Object[]> parameters() throws Exception {
String restCompatProperty = System.getProperty("tests.restCompat");
if ("true".equals(restCompatProperty)) {
return createParametersWithLegacyNodeSelectorSupport();
} else {
return createParameters();
}
return createParameters();
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.util.CollectionUtils;
Expand Down Expand Up @@ -365,11 +365,10 @@ private void iterateAssertCount(final int numberOfShards, final int iterations,
);
}

ClusterService clusterService = clusterService();
final ClusterState state = clusterService.state();
final ClusterState state = clusterService().state();
for (int shard = 0; shard < numberOfShards; shard++) {
for (String id : ids) {
ShardId docShard = clusterService.operationRouting().shardId(state, "test", id, null);
ShardId docShard = OperationRouting.shardId(state, "test", id, null);
if (docShard.id() == shard) {
final IndexShardRoutingTable indexShardRoutingTable = state.routingTable().shardRoutingTable("test", shard);
for (int copy = 0; copy < indexShardRoutingTable.size(); copy++) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ final class RequestDispatcher {
for (String index : indices) {
final GroupShardsIterator<ShardIterator> shardIts;
try {
shardIts = clusterService.operationRouting().searchShards(clusterState, new String[] { index }, null, null, null, null);
shardIts = clusterService.operationRouting().searchShards(clusterState, new String[] { index }, null, null);
} catch (Exception e) {
onIndexFailure.accept(index, e);
continue;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.PlainShardIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.core.TimeValue;
Expand Down Expand Up @@ -109,7 +108,7 @@ protected ShardIterator shards(ClusterState state, InternalRequest request) {
if (iterator == null) {
return null;
}
return new PlainShardIterator(iterator.shardId(), iterator.getShardRoutings().stream().filter(ShardRouting::isSearchable).toList());
return PlainShardIterator.allSearchableShards(iterator);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.common.util.concurrent.EsExecutors;
Expand Down Expand Up @@ -81,7 +82,7 @@ protected void doExecute(Task task, final MultiGetRequest request, final ActionL
lastResolvedIndex = Tuple.tuple(item.index(), concreteSingleIndex);
}
item.routing(clusterState.metadata().resolveIndexRouting(item.routing(), item.index()));
shardId = clusterService.operationRouting().shardId(clusterState, concreteSingleIndex, item.id(), item.routing());
shardId = OperationRouting.shardId(clusterState, concreteSingleIndex, item.id(), item.routing());
} catch (RoutingMissingException e) {
responses.set(i, newItemFailure(e.getIndex().getName(), e.getId(), e));
continue;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.PlainShardIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.core.TimeValue;
Expand Down Expand Up @@ -113,7 +112,7 @@ protected ShardIterator shards(ClusterState state, InternalRequest request) {
if (iterator == null) {
return null;
}
return new PlainShardIterator(iterator.shardId(), iterator.getShardRoutings().stream().filter(ShardRouting::isSearchable).toList());
return PlainShardIterator.allSearchableShards(iterator);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.common.util.concurrent.EsExecutors;
Expand Down Expand Up @@ -72,8 +73,12 @@ protected void doExecute(Task task, final MultiTermVectorsRequest request, final
clusterState.metadata().resolveIndexRouting(termVectorsRequest.routing(), termVectorsRequest.index())
);
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, termVectorsRequest).getName();
shardId = clusterService.operationRouting()
.shardId(clusterState, concreteSingleIndex, termVectorsRequest.id(), termVectorsRequest.routing());
shardId = OperationRouting.shardId(
clusterState,
concreteSingleIndex,
termVectorsRequest.id(),
termVectorsRequest.routing()
);
} catch (RoutingMissingException e) {
responses.set(
i,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,16 +62,27 @@ public TransportTermVectorsAction(

@Override
protected ShardIterator shards(ClusterState state, InternalRequest request) {
final var operationRouting = clusterService.operationRouting();
if (request.request().doc() != null && request.request().routing() == null) {
// artificial document without routing specified, ignore its "id" and use either random shard or according to preference
GroupShardsIterator<ShardIterator> groupShardsIter = clusterService.operationRouting()
.searchShards(state, new String[] { request.concreteIndex() }, null, request.request().preference());
GroupShardsIterator<ShardIterator> groupShardsIter = operationRouting.searchShards(
state,
new String[] { request.concreteIndex() },
null,
request.request().preference()
);
return groupShardsIter.iterator().next();
}

ShardIterator shards = clusterService.operationRouting()
.getShards(state, request.concreteIndex(), request.request().id(), request.request().routing(), request.request().preference());
return clusterService.operationRouting().useOnlyPromotableShardsForStateless(shards);
return operationRouting.useOnlyPromotableShardsForStateless(
operationRouting.getShards(
state,
request.concreteIndex(),
request.request().id(),
request.request().routing(),
request.request().preference()
)
);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParserUtils;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.UpdateForV9;
import org.elasticsearch.gateway.MetadataStateFormat;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexMode;
Expand Down Expand Up @@ -944,22 +943,12 @@ public IndexMetadata withIncrementedPrimaryTerm(int shardId) {
/**
* @param timestampRange new @timestamp range
* @param eventIngestedRange new 'event.ingested' range
* @param minClusterTransportVersion minimum transport version used between nodes of this cluster
* @return copy of this instance with updated timestamp range
*/
public IndexMetadata withTimestampRanges(
IndexLongFieldRange timestampRange,
IndexLongFieldRange eventIngestedRange,
TransportVersion minClusterTransportVersion
) {
public IndexMetadata withTimestampRanges(IndexLongFieldRange timestampRange, IndexLongFieldRange eventIngestedRange) {
if (timestampRange.equals(this.timestampRange) && eventIngestedRange.equals(this.eventIngestedRange)) {
return this;
}
@UpdateForV9(owner = UpdateForV9.Owner.SEARCH_FOUNDATIONS) // remove this check when 8.15 is no longer communicable
IndexLongFieldRange allowedEventIngestedRange = eventIngestedRange;
if (minClusterTransportVersion.before(TransportVersions.V_8_15_0)) {
allowedEventIngestedRange = IndexLongFieldRange.UNKNOWN;
}
return new IndexMetadata(
this.index,
this.version,
Expand Down Expand Up @@ -990,7 +979,7 @@ public IndexMetadata withTimestampRanges(
this.isSystem,
this.isHidden,
timestampRange,
allowedEventIngestedRange,
eventIngestedRange,
this.priority,
this.creationDate,
this.ignoreDiskWatermarks,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.Diffable;
Expand Down Expand Up @@ -521,7 +520,7 @@ public Metadata withLastCommittedValues(
/**
* Creates a copy of this instance updated with the given {@link IndexMetadata} that must only contain changes to primary terms
* and in-sync allocation ids relative to the existing entries. This method is only used by
* {@link org.elasticsearch.cluster.routing.allocation.IndexMetadataUpdater#applyChanges(Metadata, RoutingTable, TransportVersion)}.
* {@link org.elasticsearch.cluster.routing.allocation.IndexMetadataUpdater#applyChanges(Metadata, RoutingTable)}.
* @param updates map of index name to {@link IndexMetadata}.
* @return updated metadata instance
*/
Expand Down
Loading

0 comments on commit 51f5928

Please sign in to comment.