use of org.elasticsearch.cluster.routing.IndexShardRoutingTable in project elasticsearch by elastic.
the class InSyncAllocationIdTests method testPrimaryFailureBatchedWithReplicaFailure.
/**
* Assume following scenario: indexing request is written to primary, but fails to be replicated to active replica.
* The primary instructs master to fail replica before acknowledging write to client. In the meanwhile, primary fails for an unrelated
* reason. Master now batches both requests to fail primary and replica. We have to make sure that only the allocation id of the primary
* is kept in the in-sync allocation set before we acknowledge request to client. Otherwise we would acknowledge a write that made it
* into the primary but not the replica but the replica is still considered non-stale.
*/
public void testPrimaryFailureBatchedWithReplicaFailure() throws Exception {
ClusterState clusterState = createOnePrimaryOneReplicaClusterState(allocation);
IndexShardRoutingTable shardRoutingTable = clusterState.routingTable().index("test").shard(0);
ShardRouting primaryShard = shardRoutingTable.primaryShard();
ShardRouting replicaShard = shardRoutingTable.replicaShards().get(0);
long primaryTerm = clusterState.metaData().index("test").primaryTerm(0);
List<ShardEntry> failureEntries = new ArrayList<>();
failureEntries.add(new ShardEntry(shardRoutingTable.shardId(), primaryShard.allocationId().getId(), 0L, "dummy", null));
failureEntries.add(new ShardEntry(shardRoutingTable.shardId(), replicaShard.allocationId().getId(), primaryTerm, "dummy", null));
Collections.shuffle(failureEntries, random());
logger.info("Failing {}", failureEntries);
clusterState = failedClusterStateTaskExecutor.execute(clusterState, failureEntries).resultingState;
assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0), equalTo(Collections.singleton(primaryShard.allocationId().getId())));
// resend shard failures to check if they are ignored
clusterState = failedClusterStateTaskExecutor.execute(clusterState, failureEntries).resultingState;
assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0), equalTo(Collections.singleton(primaryShard.allocationId().getId())));
}
use of org.elasticsearch.cluster.routing.IndexShardRoutingTable in project elasticsearch by elastic.
the class DecisionsImpactOnClusterHealthTests method testPrimaryShardYesDecisionOnIndexCreation.
public void testPrimaryShardYesDecisionOnIndexCreation() throws IOException {
final String indexName = "test-idx";
Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()).build();
AllocationDecider decider = new TestAllocateDecision(Decision.YES) {
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
if (node.getByShardId(shardRouting.shardId()) == null) {
return Decision.YES;
} else {
return Decision.NO;
}
}
};
// if deciders say YES to allocating primary shards, stay in YELLOW state
ClusterState clusterState = runAllocationTest(settings, indexName, Collections.singleton(decider), ClusterHealthStatus.YELLOW);
// make sure primaries are initialized
RoutingTable routingTable = clusterState.routingTable();
for (IndexShardRoutingTable indexShardRoutingTable : routingTable.index(indexName)) {
assertTrue(indexShardRoutingTable.primaryShard().initializing());
}
}
use of org.elasticsearch.cluster.routing.IndexShardRoutingTable in project elasticsearch by elastic.
the class ActiveShardCountTests method startWaitOnShards.
private ClusterState startWaitOnShards(final ClusterState clusterState, final String indexName, final int numShardsToStart) {
RoutingTable routingTable = clusterState.routingTable();
IndexRoutingTable indexRoutingTable = routingTable.index(indexName);
IndexRoutingTable.Builder newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex());
for (final ObjectCursor<IndexShardRoutingTable> shardEntry : indexRoutingTable.getShards().values()) {
final IndexShardRoutingTable shardRoutingTable = shardEntry.value;
assert shardRoutingTable.getSize() > 2;
int numToStart = numShardsToStart;
for (ShardRouting shardRouting : shardRoutingTable.getShards()) {
if (shardRouting.primary()) {
assertTrue(shardRouting.active());
} else {
if (shardRouting.active() == false) {
if (numToStart > 0) {
shardRouting = shardRouting.initialize(randomAsciiOfLength(8), null, shardRouting.getExpectedShardSize()).moveToStarted();
numToStart--;
}
} else {
numToStart--;
}
}
newIndexRoutingTable.addShard(shardRouting);
}
}
routingTable = RoutingTable.builder(routingTable).add(newIndexRoutingTable).build();
return ClusterState.builder(clusterState).routingTable(routingTable).build();
}
use of org.elasticsearch.cluster.routing.IndexShardRoutingTable in project elasticsearch by elastic.
the class CorruptedFileIT method testCorruptionOnNetworkLayer.
/**
* Tests corruption that happens on the network layer and that the primary does not get affected by corruption that happens on the way
* to the replica. The file on disk stays uncorrupted
*/
public void testCorruptionOnNetworkLayer() throws ExecutionException, InterruptedException {
int numDocs = scaledRandomIntBetween(100, 1000);
internalCluster().ensureAtLeastNumDataNodes(2);
if (cluster().numDataNodes() < 3) {
internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false));
}
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
List<NodeStats> dataNodeStats = new ArrayList<>();
for (NodeStats stat : nodeStats.getNodes()) {
if (stat.getNode().isDataNode()) {
dataNodeStats.add(stat);
}
}
assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
Collections.shuffle(dataNodeStats, random());
NodeStats primariesNode = dataNodeStats.get(0);
NodeStats unluckyNode = dataNodeStats.get(1);
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, // don't go crazy here it must recovery fast
between(1, 4)).put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false).put("index.routing.allocation.include._name", primariesNode.getNode().getName()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)));
ensureGreen();
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
}
indexRandom(true, builders);
ensureGreen();
assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
// we have to flush at least once here since we don't corrupt the translog
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
final boolean truncate = randomBoolean();
for (NodeStats dataNode : dataNodeStats) {
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
@Override
protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException {
if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) {
RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
if (truncate && req.length() > 1) {
BytesRef bytesRef = req.content().toBytesRef();
BytesArray array = new BytesArray(bytesRef.bytes, bytesRef.offset, (int) req.length() - 1);
request = new RecoveryFileChunkRequest(req.recoveryId(), req.shardId(), req.metadata(), req.position(), array, req.lastChunk(), req.totalTranslogOps(), req.sourceThrottleTimeInNanos());
} else {
assert req.content().toBytesRef().bytes == req.content().toBytesRef().bytes : "no internal reference!!";
final byte[] array = req.content().toBytesRef().bytes;
int i = randomIntBetween(0, req.content().length() - 1);
// flip one byte in the content
array[i] = (byte) ~array[i];
}
}
super.sendRequest(connection, requestId, action, request, options);
}
});
}
Settings build = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1").put("index.routing.allocation.include._name", "*").build();
client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
client().admin().cluster().prepareReroute().get();
ClusterHealthResponse actionGet = client().admin().cluster().health(Requests.clusterHealthRequest("test").waitForGreenStatus()).actionGet();
if (actionGet.isTimedOut()) {
logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get());
assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false));
}
// we are green so primaries got not corrupted.
// ensure that no shard is actually allocated on the unlucky node
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
for (IndexShardRoutingTable table : clusterStateResponse.getState().getRoutingTable().index("test")) {
for (ShardRouting routing : table) {
if (unluckyNode.getNode().getId().equals(routing.currentNodeId())) {
assertThat(routing.state(), not(equalTo(ShardRoutingState.STARTED)));
assertThat(routing.state(), not(equalTo(ShardRoutingState.RELOCATING)));
}
}
}
final int numIterations = scaledRandomIntBetween(5, 20);
for (int i = 0; i < numIterations; i++) {
SearchResponse response = client().prepareSearch().setSize(numDocs).get();
assertHitCount(response, numDocs);
}
}
use of org.elasticsearch.cluster.routing.IndexShardRoutingTable in project elasticsearch by elastic.
the class ClusterStateDiffIT method randomChangeToIndexRoutingTable.
/**
* Randomly updates index routing table in the cluster state
*/
private IndexRoutingTable randomChangeToIndexRoutingTable(IndexRoutingTable original, String[] nodes) {
IndexRoutingTable.Builder builder = IndexRoutingTable.builder(original.getIndex());
for (ObjectCursor<IndexShardRoutingTable> indexShardRoutingTable : original.shards().values()) {
Set<String> availableNodes = Sets.newHashSet(nodes);
for (ShardRouting shardRouting : indexShardRoutingTable.value.shards()) {
availableNodes.remove(shardRouting.currentNodeId());
if (shardRouting.relocating()) {
availableNodes.remove(shardRouting.relocatingNodeId());
}
}
for (ShardRouting shardRouting : indexShardRoutingTable.value.shards()) {
final ShardRouting updatedShardRouting = randomChange(shardRouting, availableNodes);
availableNodes.remove(updatedShardRouting.currentNodeId());
if (shardRouting.relocating()) {
availableNodes.remove(updatedShardRouting.relocatingNodeId());
}
builder.addShard(updatedShardRouting);
}
}
return builder.build();
}
Aggregations