use of org.elasticsearch.cluster.routing.IndexShardRoutingTable in project elasticsearch by elastic.
the class TransportWriteActionTests method testReplicaProxy.
public void testReplicaProxy() throws InterruptedException, ExecutionException {
CapturingTransport transport = new CapturingTransport();
TransportService transportService = new TransportService(clusterService.getSettings(), transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null);
transportService.start();
transportService.acceptIncomingRequests();
ShardStateAction shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool);
TestAction action = action = new TestAction(Settings.EMPTY, "testAction", transportService, clusterService, shardStateAction, threadPool);
ReplicationOperation.Replicas proxy = action.newReplicasProxy();
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
ClusterState state = ClusterStateCreationUtils.stateWithActivePrimary(index, true, 1 + randomInt(3), randomInt(2));
logger.info("using state: {}", state);
ClusterServiceUtils.setState(clusterService, state);
// check that at unknown node fails
PlainActionFuture<ReplicaResponse> listener = new PlainActionFuture<>();
proxy.performOn(TestShardRouting.newShardRouting(shardId, "NOT THERE", false, randomFrom(ShardRoutingState.values())), new TestRequest(), listener);
assertTrue(listener.isDone());
assertListenerThrows("non existent node should throw a NoNodeAvailableException", listener, NoNodeAvailableException.class);
final IndexShardRoutingTable shardRoutings = state.routingTable().shardRoutingTable(shardId);
final ShardRouting replica = randomFrom(shardRoutings.replicaShards().stream().filter(ShardRouting::assignedToNode).collect(Collectors.toList()));
listener = new PlainActionFuture<>();
proxy.performOn(replica, new TestRequest(), listener);
assertFalse(listener.isDone());
CapturingTransport.CapturedRequest[] captures = transport.getCapturedRequestsAndClear();
assertThat(captures, arrayWithSize(1));
if (randomBoolean()) {
final TransportReplicationAction.ReplicaResponse response = new TransportReplicationAction.ReplicaResponse(randomAsciiOfLength(10), randomLong());
transport.handleResponse(captures[0].requestId, response);
assertTrue(listener.isDone());
assertThat(listener.get(), equalTo(response));
} else if (randomBoolean()) {
transport.handleRemoteError(captures[0].requestId, new ElasticsearchException("simulated"));
assertTrue(listener.isDone());
assertListenerThrows("listener should reflect remote error", listener, ElasticsearchException.class);
} else {
transport.handleError(captures[0].requestId, new TransportException("simulated"));
assertTrue(listener.isDone());
assertListenerThrows("listener should reflect remote error", listener, TransportException.class);
}
AtomicReference<Object> failure = new AtomicReference<>();
AtomicReference<Object> ignoredFailure = new AtomicReference<>();
AtomicBoolean success = new AtomicBoolean();
proxy.failShardIfNeeded(replica, randomIntBetween(1, 10), "test", new ElasticsearchException("simulated"), () -> success.set(true), failure::set, ignoredFailure::set);
CapturingTransport.CapturedRequest[] shardFailedRequests = transport.getCapturedRequestsAndClear();
// A write replication action proxy should fail the shard
assertEquals(1, shardFailedRequests.length);
CapturingTransport.CapturedRequest shardFailedRequest = shardFailedRequests[0];
ShardStateAction.ShardEntry shardEntry = (ShardStateAction.ShardEntry) shardFailedRequest.request;
// the shard the request was sent to and the shard to be failed should be the same
assertEquals(shardEntry.getShardId(), replica.shardId());
assertEquals(shardEntry.getAllocationId(), replica.allocationId().getId());
if (randomBoolean()) {
// simulate success
transport.handleResponse(shardFailedRequest.requestId, TransportResponse.Empty.INSTANCE);
assertTrue(success.get());
assertNull(failure.get());
assertNull(ignoredFailure.get());
} else if (randomBoolean()) {
// simulate the primary has been demoted
transport.handleRemoteError(shardFailedRequest.requestId, new ShardStateAction.NoLongerPrimaryShardException(replica.shardId(), "shard-failed-test"));
assertFalse(success.get());
assertNotNull(failure.get());
assertNull(ignoredFailure.get());
} else {
// simulated an "ignored" exception
transport.handleRemoteError(shardFailedRequest.requestId, new NodeClosedException(state.nodes().getLocalNode()));
assertFalse(success.get());
assertNull(failure.get());
assertNotNull(ignoredFailure.get());
}
}
use of org.elasticsearch.cluster.routing.IndexShardRoutingTable in project elasticsearch by elastic.
the class BalanceUnbalancedClusterTests method allocateNew.
@Override
protected ClusterState allocateNew(ClusterState state) {
String index = "tweets-2014-12-29:00";
AllocationService strategy = createAllocationService(Settings.builder().build());
MetaData metaData = MetaData.builder(state.metaData()).put(IndexMetaData.builder(index).settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1)).build();
RoutingTable initialRoutingTable = RoutingTable.builder(state.routingTable()).addAsNew(metaData.index(index)).build();
ClusterState clusterState = ClusterState.builder(state).metaData(metaData).routingTable(initialRoutingTable).build();
clusterState = strategy.reroute(clusterState, "reroute");
while (true) {
if (clusterState.routingTable().shardsWithState(INITIALIZING).isEmpty()) {
break;
}
clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
}
Map<String, Integer> counts = new HashMap<>();
for (IndexShardRoutingTable table : clusterState.routingTable().index(index)) {
for (ShardRouting r : table) {
String s = r.currentNodeId();
Integer count = counts.get(s);
if (count == null) {
count = 0;
}
count++;
counts.put(s, count);
}
}
for (Map.Entry<String, Integer> count : counts.entrySet()) {
// we have 10 shards and 4 nodes so 2 nodes have 3 shards and 2 nodes have 2 shards
assertTrue("Node: " + count.getKey() + " has shard mismatch: " + count.getValue(), count.getValue() >= 2);
assertTrue("Node: " + count.getKey() + " has shard mismatch: " + count.getValue(), count.getValue() <= 3);
}
return clusterState;
}
use of org.elasticsearch.cluster.routing.IndexShardRoutingTable in project elasticsearch by elastic.
the class SyncedFlushSingleNodeTests method testModificationPreventsFlushing.
public void testModificationPreventsFlushing() throws InterruptedException {
createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId();
final ClusterState state = getInstanceFromNode(ClusterService.class).state();
final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
assertEquals("exactly one active shard", 1, activeShards.size());
Map<String, Engine.CommitId> commitIds = SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
assertEquals("exactly one commit id", 1, commitIds.size());
client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON).get();
String syncId = UUIDs.base64UUID();
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
flushService.sendSyncRequests(syncId, activeShards, state, commitIds, shardId, shardRoutingTable.size(), listener);
listener.latch.await();
assertNull(listener.error);
ShardsSyncedFlushResult syncedFlushResult = listener.result;
assertNotNull(syncedFlushResult);
assertEquals(0, syncedFlushResult.successfulShards());
assertEquals(1, syncedFlushResult.totalShards());
assertEquals(syncId, syncedFlushResult.syncId());
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
assertEquals("pending operations", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
// pull another commit and make sure we can't sync-flush with the old one
SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
listener = new SyncedFlushUtil.LatchedListener();
flushService.sendSyncRequests(syncId, activeShards, state, commitIds, shardId, shardRoutingTable.size(), listener);
listener.latch.await();
assertNull(listener.error);
syncedFlushResult = listener.result;
assertNotNull(syncedFlushResult);
assertEquals(0, syncedFlushResult.successfulShards());
assertEquals(1, syncedFlushResult.totalShards());
assertEquals(syncId, syncedFlushResult.syncId());
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
}
use of org.elasticsearch.cluster.routing.IndexShardRoutingTable in project elasticsearch by elastic.
the class AbstractIndicesClusterStateServiceTestCase method assertClusterStateMatchesNodeState.
/**
* Checks if cluster state matches internal state of IndicesClusterStateService instance
*
* @param state cluster state used for matching
*/
public void assertClusterStateMatchesNodeState(ClusterState state, IndicesClusterStateService indicesClusterStateService) {
MockIndicesService indicesService = (MockIndicesService) indicesClusterStateService.indicesService;
ConcurrentMap<ShardId, ShardRouting> failedShardsCache = indicesClusterStateService.failedShardsCache;
RoutingNode localRoutingNode = state.getRoutingNodes().node(state.getNodes().getLocalNodeId());
if (localRoutingNode != null) {
if (enableRandomFailures == false) {
assertThat("failed shard cache should be empty", failedShardsCache.values(), empty());
}
// check that all shards in local routing nodes have been allocated
for (ShardRouting shardRouting : localRoutingNode) {
Index index = shardRouting.index();
IndexMetaData indexMetaData = state.metaData().getIndexSafe(index);
MockIndexShard shard = indicesService.getShardOrNull(shardRouting.shardId());
ShardRouting failedShard = failedShardsCache.get(shardRouting.shardId());
if (enableRandomFailures) {
if (shard == null && failedShard == null) {
fail("Shard with id " + shardRouting + " expected but missing in indicesService and failedShardsCache");
}
if (failedShard != null && failedShard.isSameAllocation(shardRouting) == false) {
fail("Shard cache has not been properly cleaned for " + failedShard);
}
} else {
if (shard == null) {
fail("Shard with id " + shardRouting + " expected but missing in indicesService");
}
}
if (shard != null) {
AllocatedIndex<? extends Shard> indexService = indicesService.indexService(index);
assertTrue("Index " + index + " expected but missing in indicesService", indexService != null);
// index metadata has been updated
assertThat(indexService.getIndexSettings().getIndexMetaData(), equalTo(indexMetaData));
// shard has been created
if (enableRandomFailures == false || failedShard == null) {
assertTrue("Shard with id " + shardRouting + " expected but missing in indexService", shard != null);
// shard has latest shard routing
assertThat(shard.routingEntry(), equalTo(shardRouting));
}
if (shard.routingEntry().primary() && shard.routingEntry().active()) {
IndexShardRoutingTable shardRoutingTable = state.routingTable().shardRoutingTable(shard.shardId());
Set<String> activeIds = shardRoutingTable.activeShards().stream().map(r -> r.allocationId().getId()).collect(Collectors.toSet());
Set<String> initializingIds = shardRoutingTable.getAllInitializingShards().stream().map(r -> r.allocationId().getId()).collect(Collectors.toSet());
assertThat(shard.routingEntry() + " isn't updated with active aIDs", shard.activeAllocationIds, equalTo(activeIds));
assertThat(shard.routingEntry() + " isn't updated with init aIDs", shard.initializingAllocationIds, equalTo(initializingIds));
}
}
}
}
// all other shards / indices have been cleaned up
for (AllocatedIndex<? extends Shard> indexService : indicesService) {
assertTrue(state.metaData().getIndexSafe(indexService.index()) != null);
boolean shardsFound = false;
for (Shard shard : indexService) {
shardsFound = true;
ShardRouting persistedShardRouting = shard.routingEntry();
ShardRouting shardRouting = localRoutingNode.getByShardId(persistedShardRouting.shardId());
if (shardRouting == null) {
fail("Shard with id " + persistedShardRouting + " locally exists but missing in routing table");
}
if (shardRouting.equals(persistedShardRouting) == false) {
fail("Local shard " + persistedShardRouting + " has stale routing" + shardRouting);
}
}
if (shardsFound == false) {
if (enableRandomFailures) {
// check if we have shards of that index in failedShardsCache
// if yes, we might not have cleaned the index as failedShardsCache can be populated by another thread
assertFalse(failedShardsCache.keySet().stream().noneMatch(shardId -> shardId.getIndex().equals(indexService.index())));
} else {
fail("index service for index " + indexService.index() + " has no shards");
}
}
}
}
use of org.elasticsearch.cluster.routing.IndexShardRoutingTable in project elasticsearch by elastic.
the class SyncedFlushSingleNodeTests method testFailAfterIntermediateCommit.
public void testFailAfterIntermediateCommit() throws InterruptedException {
createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId();
final ClusterState state = getInstanceFromNode(ClusterService.class).state();
final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
assertEquals("exactly one active shard", 1, activeShards.size());
Map<String, Engine.CommitId> commitIds = SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
assertEquals("exactly one commit id", 1, commitIds.size());
if (randomBoolean()) {
client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON).get();
}
client().admin().indices().prepareFlush("test").setForce(true).get();
String syncId = UUIDs.base64UUID();
final SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener();
flushService.sendSyncRequests(syncId, activeShards, state, commitIds, shardId, shardRoutingTable.size(), listener);
listener.latch.await();
assertNull(listener.error);
ShardsSyncedFlushResult syncedFlushResult = listener.result;
assertNotNull(syncedFlushResult);
assertEquals(0, syncedFlushResult.successfulShards());
assertEquals(1, syncedFlushResult.totalShards());
assertEquals(syncId, syncedFlushResult.syncId());
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
}
Aggregations