use of org.elasticsearch.indices.IndicesService in project elasticsearch by elastic.
the class ESIntegTestCase method assertConcreteMappingsOnAll.
/**
* Waits till a (pattern) field name mappings concretely exists on all nodes. Note, this waits for the current
* started shards and checks for concrete mappings.
*/
public void assertConcreteMappingsOnAll(final String index, final String type, final String... fieldNames) throws Exception {
Set<String> nodes = internalCluster().nodesInclude(index);
assertThat(nodes, Matchers.not(Matchers.emptyIterable()));
for (String node : nodes) {
IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node);
IndexService indexService = indicesService.indexService(resolveIndex(index));
assertThat("index service doesn't exists on " + node, indexService, notNullValue());
DocumentMapper documentMapper = indexService.mapperService().documentMapper(type);
assertThat("document mapper doesn't exists on " + node, documentMapper, notNullValue());
for (String fieldName : fieldNames) {
Collection<String> matches = documentMapper.mappers().simpleMatchToFullName(fieldName);
assertThat("field " + fieldName + " doesn't exists on " + node, matches, Matchers.not(emptyIterable()));
}
}
assertMappingOnMaster(index, type, fieldNames);
}
use of org.elasticsearch.indices.IndicesService in project elasticsearch by elastic.
the class DynamicMappingDisabledTests method setUp.
@Override
public void setUp() throws Exception {
super.setUp();
Settings settings = Settings.builder().put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false).build();
clusterService = createClusterService(threadPool);
Transport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), new NetworkService(settings, Collections.emptyList()));
transportService = new TransportService(clusterService.getSettings(), transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null);
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
ShardStateAction shardStateAction = new ShardStateAction(settings, clusterService, transportService, null, null, threadPool);
ActionFilters actionFilters = new ActionFilters(Collections.emptySet());
IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings);
AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), indexNameExpressionResolver);
UpdateHelper updateHelper = new UpdateHelper(settings, null);
TransportShardBulkAction shardBulkAction = new TransportShardBulkAction(settings, transportService, clusterService, indicesService, threadPool, shardStateAction, null, updateHelper, actionFilters, indexNameExpressionResolver);
transportBulkAction = new TransportBulkAction(settings, threadPool, transportService, clusterService, null, shardBulkAction, null, actionFilters, indexNameExpressionResolver, autoCreateIndex, System::currentTimeMillis);
}
use of org.elasticsearch.indices.IndicesService in project elasticsearch by elastic.
the class SearchServiceTests method testTimeout.
public void testTimeout() throws IOException {
createIndex("index");
final SearchService service = getInstanceFromNode(SearchService.class);
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index"));
final IndexShard indexShard = indexService.getShard(0);
final SearchContext contextWithDefaultTimeout = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), null);
// the search context should inherit the default timeout
assertThat(contextWithDefaultTimeout.timeout(), equalTo(TimeValue.timeValueSeconds(5)));
final long seconds = randomIntBetween(6, 10);
final SearchContext context = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, new SearchSourceBuilder().timeout(TimeValue.timeValueSeconds(seconds)), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), null);
// the search context should inherit the query timeout
assertThat(context.timeout(), equalTo(TimeValue.timeValueSeconds(seconds)));
}
use of org.elasticsearch.indices.IndicesService in project elasticsearch by elastic.
the class SharedClusterSnapshotRestoreIT method testSnapshotCanceledOnRemovedShard.
/**
* This test ensures that when a shard is removed from a node (perhaps due to the node
* leaving the cluster, then returning), all snapshotting of that shard is aborted, so
* all Store references held onto by the snapshot are released.
*
* See https://github.com/elastic/elasticsearch/issues/20876
*/
public void testSnapshotCanceledOnRemovedShard() throws Exception {
final int numPrimaries = 1;
final int numReplicas = 1;
final int numDocs = 100;
final String repo = "test-repo";
final String index = "test-idx";
final String snapshot = "test-snap";
assertAcked(prepareCreate(index, 1, Settings.builder().put("number_of_shards", numPrimaries).put("number_of_replicas", numReplicas)));
logger.info("--> indexing some data");
for (int i = 0; i < numDocs; i++) {
index(index, "doc", Integer.toString(i), "foo", "bar" + i);
}
refresh();
logger.info("--> creating repository");
PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository(repo).setType("mock").setSettings(Settings.builder().put("location", randomRepoPath()).put("random", randomAsciiOfLength(10)).put("wait_after_unblock", 200)).get();
assertTrue(putRepositoryResponse.isAcknowledged());
String blockedNode = blockNodeWithIndex(repo, index);
logger.info("--> snapshot");
client().admin().cluster().prepareCreateSnapshot(repo, snapshot).setWaitForCompletion(false).execute();
logger.info("--> waiting for block to kick in on node [{}]", blockedNode);
waitForBlock(blockedNode, repo, TimeValue.timeValueSeconds(10));
logger.info("--> removing primary shard that is being snapshotted");
ClusterState clusterState = internalCluster().clusterService(internalCluster().getMasterName()).state();
IndexRoutingTable indexRoutingTable = clusterState.getRoutingTable().index(index);
String nodeWithPrimary = clusterState.nodes().get(indexRoutingTable.shard(0).primaryShard().currentNodeId()).getName();
assertNotNull("should be at least one node with a primary shard", nodeWithPrimary);
IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeWithPrimary);
IndexService indexService = indicesService.indexService(resolveIndex(index));
indexService.removeShard(0, "simulate node removal");
logger.info("--> unblocking blocked node [{}]", blockedNode);
unblockNode(repo, blockedNode);
logger.info("--> ensuring snapshot is aborted and the aborted shard was marked as failed");
SnapshotInfo snapshotInfo = waitForCompletion(repo, snapshot, TimeValue.timeValueSeconds(10));
assertEquals(1, snapshotInfo.shardFailures().size());
assertEquals(0, snapshotInfo.shardFailures().get(0).shardId());
assertEquals("IndexShardSnapshotFailedException[Aborted]", snapshotInfo.shardFailures().get(0).reason());
}
use of org.elasticsearch.indices.IndicesService in project crate by crate.
the class IndexShard method startRecovery.
public void startRecovery(RecoveryState recoveryState, PeerRecoveryTargetService recoveryTargetService, PeerRecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService, Consumer<MappingMetadata> mappingUpdateConsumer, IndicesService indicesService) {
// }
assert recoveryState.getRecoverySource().equals(shardRouting.recoverySource());
switch(recoveryState.getRecoverySource().getType()) {
case EMPTY_STORE:
case EXISTING_STORE:
executeRecovery("from store", recoveryState, recoveryListener, this::recoverFromStore);
break;
case PEER:
try {
markAsRecovering("from " + recoveryState.getSourceNode(), recoveryState);
recoveryTargetService.startRecovery(this, recoveryState.getSourceNode(), recoveryListener);
} catch (Exception e) {
failShard("corrupted preexisting index", e);
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true);
}
break;
case SNAPSHOT:
final String repo = ((SnapshotRecoverySource) recoveryState.getRecoverySource()).snapshot().getRepository();
executeRecovery("from snapshot", recoveryState, recoveryListener, l -> restoreFromRepository(repositoriesService.repository(repo), l));
break;
case LOCAL_SHARDS:
final IndexMetadata indexMetadata = indexSettings().getIndexMetadata();
final Index resizeSourceIndex = indexMetadata.getResizeSourceIndex();
final List<IndexShard> startedShards = new ArrayList<>();
final IndexService sourceIndexService = indicesService.indexService(resizeSourceIndex);
final Set<ShardId> requiredShards;
final int numShards;
if (sourceIndexService != null) {
requiredShards = IndexMetadata.selectRecoverFromShards(shardId().id(), sourceIndexService.getMetadata(), indexMetadata.getNumberOfShards());
for (IndexShard shard : sourceIndexService) {
if (shard.state() == IndexShardState.STARTED && requiredShards.contains(shard.shardId())) {
startedShards.add(shard);
}
}
numShards = requiredShards.size();
} else {
numShards = -1;
requiredShards = Collections.emptySet();
}
if (numShards == startedShards.size()) {
assert requiredShards.isEmpty() == false;
executeRecovery("from local shards", recoveryState, recoveryListener, l -> recoverFromLocalShards(mappingUpdateConsumer, startedShards.stream().filter((s) -> requiredShards.contains(s.shardId())).collect(Collectors.toList()), l));
} else {
final RuntimeException e;
if (numShards == -1) {
e = new IndexNotFoundException(resizeSourceIndex);
} else {
e = new IllegalStateException("not all required shards of index " + resizeSourceIndex + " are started yet, expected " + numShards + " found " + startedShards.size() + " can't recover shard " + shardId());
}
throw e;
}
break;
default:
throw new IllegalArgumentException("Unknown recovery source " + recoveryState.getRecoverySource());
}
}
Aggregations