use of org.elasticsearch.cluster.metadata.IndexMetadata in project elasticsearch by elastic.
the class IndicesService method buildAliasFilter.
public AliasFilter buildAliasFilter(ClusterState state, String index, String... expressions) {
/* Being static, parseAliasFilter doesn't have access to whatever guts it needs to parse a query. Instead of passing in a bunch
* of dependencies we pass in a function that can perform the parsing. */
CheckedFunction<byte[], QueryBuilder, IOException> filterParser = bytes -> {
try (XContentParser parser = XContentFactory.xContent(bytes).createParser(xContentRegistry, bytes)) {
return new QueryParseContext(parser).parseInnerQueryBuilder();
}
};
String[] aliases = indexNameExpressionResolver.filteringAliases(state, index, expressions);
IndexMetaData indexMetaData = state.metaData().index(index);
return new AliasFilter(ShardSearchRequest.parseAliasFilter(filterParser, indexMetaData, aliases), aliases);
}
use of org.elasticsearch.cluster.metadata.IndexMetadata in project elasticsearch by elastic.
the class SyncedFlushService method attemptSyncedFlush.
/**
* a utility method to perform a synced flush for all shards of multiple indices. see {@link #attemptSyncedFlush(ShardId, ActionListener)}
* for more details.
*/
public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener<SyncedFlushResponse> listener) {
final ClusterState state = clusterService.state();
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices);
final Map<String, List<ShardsSyncedFlushResult>> results = ConcurrentCollections.newConcurrentMap();
int numberOfShards = 0;
for (Index index : concreteIndices) {
final IndexMetaData indexMetaData = state.metaData().getIndexSafe(index);
numberOfShards += indexMetaData.getNumberOfShards();
results.put(index.getName(), Collections.synchronizedList(new ArrayList<>()));
}
if (numberOfShards == 0) {
listener.onResponse(new SyncedFlushResponse(results));
return;
}
final CountDown countDown = new CountDown(numberOfShards);
for (final Index concreteIndex : concreteIndices) {
final String index = concreteIndex.getName();
final IndexMetaData indexMetaData = state.metaData().getIndexSafe(concreteIndex);
final int indexNumberOfShards = indexMetaData.getNumberOfShards();
for (int shard = 0; shard < indexNumberOfShards; shard++) {
final ShardId shardId = new ShardId(indexMetaData.getIndex(), shard);
innerAttemptSyncedFlush(shardId, state, new ActionListener<ShardsSyncedFlushResult>() {
@Override
public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
results.get(index).add(syncedFlushResult);
if (countDown.countDown()) {
listener.onResponse(new SyncedFlushResponse(results));
}
}
@Override
public void onFailure(Exception e) {
logger.debug("{} unexpected error while executing synced flush", shardId);
final int totalShards = indexMetaData.getNumberOfReplicas() + 1;
results.get(index).add(new ShardsSyncedFlushResult(shardId, totalShards, e.getMessage()));
if (countDown.countDown()) {
listener.onResponse(new SyncedFlushResponse(results));
}
}
});
}
}
}
use of org.elasticsearch.cluster.metadata.IndexMetadata in project elasticsearch by elastic.
the class StoreRecovery method recoverFromLocalShards.
boolean recoverFromLocalShards(BiConsumer<String, MappingMetaData> mappingUpdateConsumer, final IndexShard indexShard, final List<LocalShardSnapshot> shards) throws IOException {
if (canRecover(indexShard)) {
RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType();
assert recoveryType == RecoverySource.Type.LOCAL_SHARDS : "expected local shards recovery type: " + recoveryType;
if (shards.isEmpty()) {
throw new IllegalArgumentException("shards must not be empty");
}
Set<Index> indices = shards.stream().map((s) -> s.getIndex()).collect(Collectors.toSet());
if (indices.size() > 1) {
throw new IllegalArgumentException("can't add shards from more than one index");
}
IndexMetaData indexMetaData = shards.get(0).getIndexMetaData();
for (ObjectObjectCursor<String, MappingMetaData> mapping : indexMetaData.getMappings()) {
mappingUpdateConsumer.accept(mapping.key, mapping.value);
}
indexShard.mapperService().merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, true);
return executeRecovery(indexShard, () -> {
logger.debug("starting recovery from local shards {}", shards);
try {
// don't close this directory!!
final Directory directory = indexShard.store().directory();
addIndices(indexShard.recoveryState().getIndex(), directory, shards.stream().map(s -> s.getSnapshotDirectory()).collect(Collectors.toList()).toArray(new Directory[shards.size()]));
internalRecoverFromStore(indexShard);
// just trigger a merge to do housekeeping on the
// copied segments - we will also see them in stats etc.
indexShard.getEngine().forceMerge(false, -1, false, false, false);
} catch (IOException ex) {
throw new IndexShardRecoveryException(indexShard.shardId(), "failed to recover from local shards", ex);
}
});
}
return false;
}
use of org.elasticsearch.cluster.metadata.IndexMetadata in project elasticsearch by elastic.
the class TransportShardBulkActionTests method testExecuteBulkDeleteRequest.
public void testExecuteBulkDeleteRequest() throws Exception {
IndexMetaData metaData = indexMetaData();
IndexShard shard = newStartedShard(true);
BulkItemRequest[] items = new BulkItemRequest[1];
DocWriteRequest writeRequest = new DeleteRequest("index", "type", "id");
items[0] = new BulkItemRequest(0, writeRequest);
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
Translog.Location location = new Translog.Location(0, 0, 0);
UpdateHelper updateHelper = null;
Translog.Location newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, location, 0, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer());
// Translog changes, even though the document didn't exist
assertThat(newLocation, not(location));
BulkItemRequest replicaRequest = bulkShardRequest.items()[0];
DocWriteRequest replicaDeleteRequest = replicaRequest.request();
BulkItemResponse primaryResponse = replicaRequest.getPrimaryResponse();
DeleteResponse response = primaryResponse.getResponse();
// Any version can be matched on replica
assertThat(replicaDeleteRequest.version(), equalTo(Versions.MATCH_ANY));
assertThat(replicaDeleteRequest.versionType(), equalTo(VersionType.INTERNAL));
assertThat(primaryResponse.getItemId(), equalTo(0));
assertThat(primaryResponse.getId(), equalTo("id"));
assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.DELETE));
assertFalse(primaryResponse.isFailed());
assertThat(response.getResult(), equalTo(DocWriteResponse.Result.NOT_FOUND));
assertThat(response.getShardId(), equalTo(shard.shardId()));
assertThat(response.getIndex(), equalTo("index"));
assertThat(response.getType(), equalTo("type"));
assertThat(response.getId(), equalTo("id"));
assertThat(response.getVersion(), equalTo(1L));
assertThat(response.getSeqNo(), equalTo(0L));
assertThat(response.forcedRefresh(), equalTo(false));
// Now do the same after indexing the document, it should now find and delete the document
indexDoc(shard, "type", "id", "{\"foo\": \"bar\"}");
writeRequest = new DeleteRequest("index", "type", "id");
items[0] = new BulkItemRequest(0, writeRequest);
bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
location = newLocation;
newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, location, 0, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer());
// Translog changes, because the document was deleted
assertThat(newLocation, not(location));
replicaRequest = bulkShardRequest.items()[0];
replicaDeleteRequest = replicaRequest.request();
primaryResponse = replicaRequest.getPrimaryResponse();
response = primaryResponse.getResponse();
// Any version can be matched on replica
assertThat(replicaDeleteRequest.version(), equalTo(Versions.MATCH_ANY));
assertThat(replicaDeleteRequest.versionType(), equalTo(VersionType.INTERNAL));
assertThat(primaryResponse.getItemId(), equalTo(0));
assertThat(primaryResponse.getId(), equalTo("id"));
assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.DELETE));
assertFalse(primaryResponse.isFailed());
assertThat(response.getResult(), equalTo(DocWriteResponse.Result.DELETED));
assertThat(response.getShardId(), equalTo(shard.shardId()));
assertThat(response.getIndex(), equalTo("index"));
assertThat(response.getType(), equalTo("type"));
assertThat(response.getId(), equalTo("id"));
assertThat(response.getVersion(), equalTo(3L));
assertThat(response.getSeqNo(), equalTo(2L));
assertThat(response.forcedRefresh(), equalTo(false));
assertDocCount(shard, 0);
closeShards(shard);
}
use of org.elasticsearch.cluster.metadata.IndexMetadata in project elasticsearch by elastic.
the class TransportShardBulkActionTests method testExecuteBulkIndexRequest.
public void testExecuteBulkIndexRequest() throws Exception {
IndexMetaData metaData = indexMetaData();
IndexShard shard = newStartedShard(true);
BulkItemRequest[] items = new BulkItemRequest[1];
boolean create = randomBoolean();
DocWriteRequest writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar").create(create);
BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest);
items[0] = primaryRequest;
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
Translog.Location location = new Translog.Location(0, 0, 0);
UpdateHelper updateHelper = null;
Translog.Location newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, location, 0, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer());
// Translog should change, since there were no problems
assertThat(newLocation, not(location));
BulkItemResponse primaryResponse = bulkShardRequest.items()[0].getPrimaryResponse();
assertThat(primaryResponse.getItemId(), equalTo(0));
assertThat(primaryResponse.getId(), equalTo("id"));
assertThat(primaryResponse.getOpType(), equalTo(create ? DocWriteRequest.OpType.CREATE : DocWriteRequest.OpType.INDEX));
assertFalse(primaryResponse.isFailed());
// Assert that the document actually made it there
assertDocCount(shard, 1);
writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar").create(true);
primaryRequest = new BulkItemRequest(0, writeRequest);
items[0] = primaryRequest;
bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
Translog.Location secondLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, newLocation, 0, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer());
// Translog should not change, since the document was not indexed due to a version conflict
assertThat(secondLocation, equalTo(newLocation));
BulkItemRequest replicaRequest = bulkShardRequest.items()[0];
primaryResponse = bulkShardRequest.items()[0].getPrimaryResponse();
assertThat(primaryResponse.getItemId(), equalTo(0));
assertThat(primaryResponse.getId(), equalTo("id"));
assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.CREATE));
// Should be failed since the document already exists
assertTrue(primaryResponse.isFailed());
BulkItemResponse.Failure failure = primaryResponse.getFailure();
assertThat(failure.getIndex(), equalTo("index"));
assertThat(failure.getType(), equalTo("type"));
assertThat(failure.getId(), equalTo("id"));
assertThat(failure.getCause().getClass(), equalTo(VersionConflictEngineException.class));
assertThat(failure.getCause().getMessage(), containsString("version conflict, document already exists (current version [1])"));
assertThat(failure.getStatus(), equalTo(RestStatus.CONFLICT));
assertThat(replicaRequest, equalTo(primaryRequest));
// Assert that the document count is still 1
assertDocCount(shard, 1);
closeShards(shard);
}
Aggregations