use of org.elasticsearch.index.shard.ShardId in project elasticsearch by elastic.
the class IndicesClusterStateService method removeShards.
/**
* Removes shards that are currently loaded by indicesService but have disappeared from the routing table of the current node.
* Also removes shards where the recovery source node has changed.
* This method does not delete the shard data.
*
* @param state new cluster state
*/
private void removeShards(final ClusterState state) {
final RoutingTable routingTable = state.routingTable();
final DiscoveryNodes nodes = state.nodes();
final String localNodeId = state.nodes().getLocalNodeId();
assert localNodeId != null;
// remove shards based on routing nodes (no deletion of data)
RoutingNode localRoutingNode = state.getRoutingNodes().node(localNodeId);
for (AllocatedIndex<? extends Shard> indexService : indicesService) {
for (Shard shard : indexService) {
ShardRouting currentRoutingEntry = shard.routingEntry();
ShardId shardId = currentRoutingEntry.shardId();
ShardRouting newShardRouting = localRoutingNode == null ? null : localRoutingNode.getByShardId(shardId);
if (newShardRouting == null) {
// we can just remove the shard without cleaning it locally, since we will clean it in IndicesStore
// once all shards are allocated
logger.debug("{} removing shard (not allocated)", shardId);
indexService.removeShard(shardId.id(), "removing shard (not allocated)");
} else if (newShardRouting.isSameAllocation(currentRoutingEntry) == false) {
logger.debug("{} removing shard (stale allocation id, stale {}, new {})", shardId, currentRoutingEntry, newShardRouting);
indexService.removeShard(shardId.id(), "removing shard (stale copy)");
} else if (newShardRouting.initializing() && currentRoutingEntry.active()) {
// this can happen if the node was isolated/gc-ed, rejoins the cluster and a new shard with the same allocation id
// is assigned to it. Batch cluster state processing or if shard fetching completes before the node gets a new cluster
// state may result in a new shard being initialized while having the same allocation id as the currently started shard.
logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting);
indexService.removeShard(shardId.id(), "removing shard (stale copy)");
} else {
// remove shards where recovery source has changed. This re-initializes shards later in createOrUpdateShards
if (newShardRouting.recoverySource() != null && newShardRouting.recoverySource().getType() == Type.PEER) {
RecoveryState recoveryState = shard.recoveryState();
final DiscoveryNode sourceNode = findSourceNodeForPeerRecovery(logger, routingTable, nodes, newShardRouting);
if (recoveryState.getSourceNode().equals(sourceNode) == false) {
if (recoveryTargetService.cancelRecoveriesForShard(shardId, "recovery source node changed")) {
// getting here means that the shard was still recovering
logger.debug("{} removing shard (recovery source changed), current [{}], global [{}], shard [{}])", shardId, recoveryState.getSourceNode(), sourceNode, newShardRouting);
indexService.removeShard(shardId.id(), "removing shard (recovery source node changed)");
}
}
}
}
}
}
}
use of org.elasticsearch.index.shard.ShardId in project elasticsearch by elastic.
the class IndicesClusterStateService method createOrUpdateShards.
private void createOrUpdateShards(final ClusterState state) {
RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId());
if (localRoutingNode == null) {
return;
}
DiscoveryNodes nodes = state.nodes();
RoutingTable routingTable = state.routingTable();
for (final ShardRouting shardRouting : localRoutingNode) {
ShardId shardId = shardRouting.shardId();
if (failedShardsCache.containsKey(shardId) == false) {
AllocatedIndex<? extends Shard> indexService = indicesService.indexService(shardId.getIndex());
assert indexService != null : "index " + shardId.getIndex() + " should have been created by createIndices";
Shard shard = indexService.getShardOrNull(shardId.id());
if (shard == null) {
assert shardRouting.initializing() : shardRouting + " should have been removed by failMissingShards";
createShard(nodes, routingTable, shardRouting, state);
} else {
updateShard(nodes, shardRouting, shard, routingTable, state);
}
}
}
}
use of org.elasticsearch.index.shard.ShardId in project elasticsearch by elastic.
the class SyncedFlushService method attemptSyncedFlush.
/**
* a utility method to perform a synced flush for all shards of multiple indices. see {@link #attemptSyncedFlush(ShardId, ActionListener)}
* for more details.
*/
public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener<SyncedFlushResponse> listener) {
final ClusterState state = clusterService.state();
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices);
final Map<String, List<ShardsSyncedFlushResult>> results = ConcurrentCollections.newConcurrentMap();
int numberOfShards = 0;
for (Index index : concreteIndices) {
final IndexMetaData indexMetaData = state.metaData().getIndexSafe(index);
numberOfShards += indexMetaData.getNumberOfShards();
results.put(index.getName(), Collections.synchronizedList(new ArrayList<>()));
}
if (numberOfShards == 0) {
listener.onResponse(new SyncedFlushResponse(results));
return;
}
final CountDown countDown = new CountDown(numberOfShards);
for (final Index concreteIndex : concreteIndices) {
final String index = concreteIndex.getName();
final IndexMetaData indexMetaData = state.metaData().getIndexSafe(concreteIndex);
final int indexNumberOfShards = indexMetaData.getNumberOfShards();
for (int shard = 0; shard < indexNumberOfShards; shard++) {
final ShardId shardId = new ShardId(indexMetaData.getIndex(), shard);
innerAttemptSyncedFlush(shardId, state, new ActionListener<ShardsSyncedFlushResult>() {
@Override
public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
results.get(index).add(syncedFlushResult);
if (countDown.countDown()) {
listener.onResponse(new SyncedFlushResponse(results));
}
}
@Override
public void onFailure(Exception e) {
logger.debug("{} unexpected error while executing synced flush", shardId);
final int totalShards = indexMetaData.getNumberOfReplicas() + 1;
results.get(index).add(new ShardsSyncedFlushResult(shardId, totalShards, e.getMessage()));
if (countDown.countDown()) {
listener.onResponse(new SyncedFlushResponse(results));
}
}
});
}
}
}
use of org.elasticsearch.index.shard.ShardId in project elasticsearch by elastic.
the class SearchHit method createFromMap.
public static SearchHit createFromMap(Map<String, Object> values) {
String id = get(Fields._ID, values, null);
String type = get(Fields._TYPE, values, null);
NestedIdentity nestedIdentity = get(NestedIdentity._NESTED, values, null);
Map<String, SearchHitField> fields = get(Fields.FIELDS, values, null);
SearchHit searchHit = new SearchHit(-1, id, new Text(type), nestedIdentity, fields);
searchHit.index = get(Fields._INDEX, values, null);
searchHit.score(get(Fields._SCORE, values, DEFAULT_SCORE));
searchHit.version(get(Fields._VERSION, values, -1L));
searchHit.sortValues(get(Fields.SORT, values, SearchSortValues.EMPTY));
searchHit.highlightFields(get(Fields.HIGHLIGHT, values, null));
searchHit.sourceRef(get(SourceFieldMapper.NAME, values, null));
searchHit.explanation(get(Fields._EXPLANATION, values, null));
searchHit.setInnerHits(get(Fields.INNER_HITS, values, null));
List<String> matchedQueries = get(Fields.MATCHED_QUERIES, values, null);
if (matchedQueries != null) {
searchHit.matchedQueries(matchedQueries.toArray(new String[matchedQueries.size()]));
}
ShardId shardId = get(Fields._SHARD, values, null);
String nodeId = get(Fields._NODE, values, null);
if (shardId != null && nodeId != null) {
searchHit.shard(new SearchShardTarget(nodeId, shardId));
}
searchHit.fields(fields);
return searchHit;
}
use of org.elasticsearch.index.shard.ShardId in project elasticsearch by elastic.
the class ElasticsearchExceptionTests method testFailureToAndFromXContentWithDetails.
public void testFailureToAndFromXContentWithDetails() throws IOException {
final XContent xContent = randomFrom(XContentType.values()).xContent();
Exception failure;
Throwable failureCause;
ElasticsearchException expected;
ElasticsearchException expectedCause;
ElasticsearchException suppressed;
switch(randomIntBetween(0, 6)) {
case // Simple elasticsearch exception without cause
0:
failure = new NoNodeAvailableException("A");
expected = new ElasticsearchException("Elasticsearch exception [type=no_node_available_exception, reason=A]");
expected.addSuppressed(new ElasticsearchException("Elasticsearch exception [type=no_node_available_exception, reason=A]"));
break;
case // Simple elasticsearch exception with headers (other metadata of type number are not parsed)
1:
failure = new CircuitBreakingException("B", 5_000, 2_000);
((ElasticsearchException) failure).addHeader("header_name", "0", "1");
expected = new ElasticsearchException("Elasticsearch exception [type=circuit_breaking_exception, reason=B]");
expected.addHeader("header_name", "0", "1");
suppressed = new ElasticsearchException("Elasticsearch exception [type=circuit_breaking_exception, reason=B]");
suppressed.addHeader("header_name", "0", "1");
expected.addSuppressed(suppressed);
break;
case // Elasticsearch exception with a cause, headers and parsable metadata
2:
failureCause = new NullPointerException("var is null");
failure = new ScriptException("C", failureCause, singletonList("stack"), "test", "painless");
((ElasticsearchException) failure).addHeader("script_name", "my_script");
expectedCause = new ElasticsearchException("Elasticsearch exception [type=null_pointer_exception, reason=var is null]");
expected = new ElasticsearchException("Elasticsearch exception [type=script_exception, reason=C]", expectedCause);
expected.addHeader("script_name", "my_script");
expected.addMetadata("es.lang", "painless");
expected.addMetadata("es.script", "test");
expected.addMetadata("es.script_stack", "stack");
suppressed = new ElasticsearchException("Elasticsearch exception [type=script_exception, reason=C]");
suppressed.addHeader("script_name", "my_script");
suppressed.addMetadata("es.lang", "painless");
suppressed.addMetadata("es.script", "test");
suppressed.addMetadata("es.script_stack", "stack");
expected.addSuppressed(suppressed);
break;
case // JDK exception without cause
3:
failure = new IllegalStateException("D");
expected = new ElasticsearchException("Elasticsearch exception [type=illegal_state_exception, reason=D]");
suppressed = new ElasticsearchException("Elasticsearch exception [type=illegal_state_exception, reason=D]");
expected.addSuppressed(suppressed);
break;
case // JDK exception with cause
4:
failureCause = new RoutingMissingException("idx", "type", "id");
failure = new RuntimeException("E", failureCause);
expectedCause = new ElasticsearchException("Elasticsearch exception [type=routing_missing_exception, " + "reason=routing is required for [idx]/[type]/[id]]");
expectedCause.addMetadata("es.index", "idx");
expectedCause.addMetadata("es.index_uuid", "_na_");
expected = new ElasticsearchException("Elasticsearch exception [type=runtime_exception, reason=E]", expectedCause);
suppressed = new ElasticsearchException("Elasticsearch exception [type=runtime_exception, reason=E]");
expected.addSuppressed(suppressed);
break;
case // Wrapped exception with cause
5:
failureCause = new FileAlreadyExistsException("File exists");
failure = new BroadcastShardOperationFailedException(new ShardId("_index", "_uuid", 5), "F", failureCause);
expected = new ElasticsearchException("Elasticsearch exception [type=file_already_exists_exception, reason=File exists]");
// strangely, the wrapped exception appears as the root cause...
suppressed = new ElasticsearchException("Elasticsearch exception [type=broadcast_shard_operation_failed_exception, " + "reason=F]");
expected.addSuppressed(suppressed);
break;
case // SearchPhaseExecutionException with cause and multiple failures
6:
DiscoveryNode node = new DiscoveryNode("node_g", buildNewFakeTransportAddress(), Version.CURRENT);
failureCause = new NodeClosedException(node);
failureCause = new NoShardAvailableActionException(new ShardId("_index_g", "_uuid_g", 6), "node_g", failureCause);
ShardSearchFailure[] shardFailures = new ShardSearchFailure[] { new ShardSearchFailure(new ParsingException(0, 0, "Parsing g", null), new SearchShardTarget("node_g", new ShardId(new Index("_index_g", "_uuid_g"), 61))), new ShardSearchFailure(new RepositoryException("repository_g", "Repo"), new SearchShardTarget("node_g", new ShardId(new Index("_index_g", "_uuid_g"), 62))), new ShardSearchFailure(new SearchContextMissingException(0L), null) };
failure = new SearchPhaseExecutionException("phase_g", "G", failureCause, shardFailures);
expectedCause = new ElasticsearchException("Elasticsearch exception [type=node_closed_exception, " + "reason=node closed " + node + "]");
expectedCause = new ElasticsearchException("Elasticsearch exception [type=no_shard_available_action_exception, " + "reason=node_g]", expectedCause);
expectedCause.addMetadata("es.index", "_index_g");
expectedCause.addMetadata("es.index_uuid", "_uuid_g");
expectedCause.addMetadata("es.shard", "6");
expected = new ElasticsearchException("Elasticsearch exception [type=search_phase_execution_exception, " + "reason=G]", expectedCause);
expected.addMetadata("es.phase", "phase_g");
expected.addSuppressed(new ElasticsearchException("Elasticsearch exception [type=parsing_exception, reason=Parsing g]"));
expected.addSuppressed(new ElasticsearchException("Elasticsearch exception [type=repository_exception, " + "reason=[repository_g] Repo]"));
expected.addSuppressed(new ElasticsearchException("Elasticsearch exception [type=search_context_missing_exception, " + "reason=No search context found for id [0]]"));
break;
default:
throw new UnsupportedOperationException("Failed to generate randomized failure");
}
Exception finalFailure = failure;
BytesReference failureBytes = XContentHelper.toXContent((builder, params) -> {
ElasticsearchException.generateFailureXContent(builder, params, finalFailure, true);
return builder;
}, xContent.type(), randomBoolean());
try (XContentParser parser = createParser(xContent, failureBytes)) {
failureBytes = shuffleXContent(parser, randomBoolean()).bytes();
}
ElasticsearchException parsedFailure;
try (XContentParser parser = createParser(xContent, failureBytes)) {
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
parsedFailure = ElasticsearchException.failureFromXContent(parser);
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
assertNull(parser.nextToken());
}
assertDeepEquals(expected, parsedFailure);
}
Aggregations