use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class TransportRecoveryAction method shardOperation.
@Override
protected RecoveryState shardOperation(RecoveryRequest request, ShardRouting shardRouting) {
IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex());
IndexShard indexShard = indexService.getShard(shardRouting.shardId().id());
return indexShard.recoveryState();
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class PeerRecoverySourceService method recover.
private void recover(StartRecoveryRequest request, ActionListener<RecoveryResponse> listener) {
final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
final IndexShard shard = indexService.getShard(request.shardId().id());
final ShardRouting routingEntry = shard.routingEntry();
if (routingEntry.primary() == false || routingEntry.active() == false) {
throw new DelayRecoveryException("source shard [" + routingEntry + "] is not an active primary");
}
if (request.isPrimaryRelocation() && (routingEntry.relocating() == false || routingEntry.relocatingNodeId().equals(request.targetNode().getId()) == false)) {
LOGGER.debug("delaying recovery of {} as source shard is not marked yet as relocating to {}", request.shardId(), request.targetNode());
throw new DelayRecoveryException("source shard is not marked yet as relocating to [" + request.targetNode() + "]");
}
RecoverySourceHandler handler = ongoingRecoveries.addNewRecovery(request, shard);
LOGGER.trace("[{}][{}] starting recovery to {}", request.shardId().getIndex().getName(), request.shardId().id(), request.targetNode());
handler.recoverToTarget(ActionListener.runAfter(listener, () -> ongoingRecoveries.remove(shard, handler)));
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class SysShardsExpressionsTest method testShardSizeExpressionWhenIndexShardHasBeenClosed.
@Test
public void testShardSizeExpressionWhenIndexShardHasBeenClosed() {
IndexShard mock = mockIndexShard();
when(mock.storeStats()).thenThrow(new AlreadyClosedException("shard already closed"));
ShardReferenceResolver resolver = new ShardReferenceResolver(schemas, new ShardRowContext(mock, clusterService));
Reference refInfo = refInfo("sys.shards.size", DataTypes.LONG, RowGranularity.SHARD);
NestableInput<Long> shardSizeExpression = (NestableInput<Long>) resolver.getImplementation(refInfo);
assertThat(shardSizeExpression.value(), is(0L));
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class PrimaryAllocationIT method testPrimaryReplicaResyncFailed.
/**
* This test asserts that replicas failed to execute resync operations will be failed but not marked as stale.
*/
@TestLogging("_root:DEBUG, org.elasticsearch.cluster.routing.allocation:TRACE, org.elasticsearch.cluster.action.shard:TRACE," + "org.elasticsearch.indices.recovery:TRACE, org.elasticsearch.cluster.routing.allocation.allocator:TRACE")
@Test
public void testPrimaryReplicaResyncFailed() throws Exception {
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY);
final int numberOfReplicas = between(2, 3);
final String oldPrimary = internalCluster().startDataOnlyNode();
execute("create table t (x string) clustered into 1 shards " + "with (number_of_replicas = " + numberOfReplicas + ", \"write.wait_for_active_shards\" = 1)");
final ShardId shardId = new ShardId(clusterService().state().metadata().index(indexName).getIndex(), 0);
final Set<String> replicaNodes = new HashSet<>(internalCluster().startDataOnlyNodes(numberOfReplicas));
ensureGreen();
execute("SET GLOBAL cluster.routing.allocation.enable = 'none'");
logger.info("--> Indexing with gap in seqno to ensure that some operations will be replayed in resync");
long numDocs = scaledRandomIntBetween(5, 50);
for (int i = 0; i < numDocs; i++) {
execute("insert into t values ('" + (numDocs + i) + "')");
}
final IndexShard oldPrimaryShard = internalCluster().getInstance(IndicesService.class, oldPrimary).getShardOrNull(shardId);
// Make gap in seqno.
EngineTestCase.generateNewSeqNo(IndexShardTestCase.getEngine(oldPrimaryShard));
long moreDocs = scaledRandomIntBetween(1, 10);
for (int i = 0; i < moreDocs; i++) {
execute("insert into t values ('" + (numDocs + i) + "')");
}
final Set<String> replicasSide1 = Set.copyOf(randomSubsetOf(between(1, numberOfReplicas - 1), replicaNodes));
final Set<String> replicasSide2 = Sets.difference(replicaNodes, replicasSide1);
NetworkDisruption partition = new NetworkDisruption(new TwoPartitions(replicasSide1, replicasSide2), new NetworkDisconnect());
internalCluster().setDisruptionScheme(partition);
logger.info("--> isolating some replicas during primary-replica resync");
partition.startDisrupting();
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(oldPrimary));
// Checks that we fail replicas in one side but not mark them as stale.
assertBusy(() -> {
ClusterState state = client(master).admin().cluster().prepareState().get().getState();
final IndexShardRoutingTable shardRoutingTable = state.routingTable().shardRoutingTable(shardId);
final String newPrimaryNode = state.getRoutingNodes().node(shardRoutingTable.primaryShard().currentNodeId()).node().getName();
assertThat(newPrimaryNode, not(equalTo(oldPrimary)));
Set<String> selectedPartition = replicasSide1.contains(newPrimaryNode) ? replicasSide1 : replicasSide2;
assertThat(shardRoutingTable.activeShards(), hasSize(selectedPartition.size()));
for (ShardRouting activeShard : shardRoutingTable.activeShards()) {
assertThat(state.getRoutingNodes().node(activeShard.currentNodeId()).node().getName(), is(in(selectedPartition)));
}
assertThat(state.metadata().index(indexName).inSyncAllocationIds(shardId.id()), hasSize(numberOfReplicas + 1));
}, 1, TimeUnit.MINUTES);
execute("SET GLOBAL cluster.routing.allocation.enable = 'all'");
partition.stopDisrupting();
partition.ensureHealthy(internalCluster());
logger.info("--> stop disrupting network and re-enable allocation");
assertBusy(() -> {
ClusterState state = client(master).admin().cluster().prepareState().get().getState();
assertThat(state.routingTable().shardRoutingTable(shardId).activeShards(), hasSize(numberOfReplicas));
assertThat(state.metadata().index(indexName).inSyncAllocationIds(shardId.id()), hasSize(numberOfReplicas + 1));
for (String node : replicaNodes) {
IndexShard shard = internalCluster().getInstance(IndicesService.class, node).getShardOrNull(shardId);
assertThat(shard.getLocalCheckpoint(), equalTo(numDocs + moreDocs));
}
}, 30, TimeUnit.SECONDS);
internalCluster().assertConsistentHistoryBetweenTranslogAndLuceneIndex();
}
use of org.elasticsearch.index.shard.IndexShard in project elasticsearch by elastic.
the class SearchService method createSearchContext.
public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher) throws IOException {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().getId());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId());
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
final DefaultSearchContext searchContext = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), timeout, fetchPhase);
boolean success = false;
try {
// we clone the query shard context here just for rewriting otherwise we
// might end up with incorrect state since we are using now() or script services
// during rewrite and normalized / evaluate templates etc.
request.rewrite(new QueryShardContext(searchContext.getQueryShardContext()));
assert searchContext.getQueryShardContext().isCachable();
success = true;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(searchContext);
}
}
return searchContext;
}
Aggregations