use of org.elasticsearch.cluster.routing.ShardRouting in project elasticsearch by elastic.
the class IndicesClusterStateServiceRandomUpdatesTests method randomlyUpdateClusterState.
public ClusterState randomlyUpdateClusterState(ClusterState state, Map<DiscoveryNode, IndicesClusterStateService> clusterStateServiceMap, Supplier<MockIndicesService> indicesServiceSupplier) {
// randomly create new indices (until we have 200 max)
for (int i = 0; i < randomInt(5); i++) {
if (state.metaData().indices().size() > 200) {
break;
}
String name = "index_" + randomAsciiOfLength(15).toLowerCase(Locale.ROOT);
Settings.Builder settingsBuilder = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 3)).put(SETTING_NUMBER_OF_REPLICAS, randomInt(2));
if (randomBoolean()) {
settingsBuilder.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true).put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true);
}
CreateIndexRequest request = new CreateIndexRequest(name, settingsBuilder.build()).waitForActiveShards(ActiveShardCount.NONE);
state = cluster.createIndex(state, request);
assertTrue(state.metaData().hasIndex(name));
}
// randomly delete indices
Set<String> indicesToDelete = new HashSet<>();
int numberOfIndicesToDelete = randomInt(Math.min(2, state.metaData().indices().size()));
for (String index : randomSubsetOf(numberOfIndicesToDelete, state.metaData().indices().keys().toArray(String.class))) {
indicesToDelete.add(state.metaData().index(index).getIndex().getName());
}
if (indicesToDelete.isEmpty() == false) {
DeleteIndexRequest deleteRequest = new DeleteIndexRequest(indicesToDelete.toArray(new String[indicesToDelete.size()]));
state = cluster.deleteIndices(state, deleteRequest);
for (String index : indicesToDelete) {
assertFalse(state.metaData().hasIndex(index));
}
}
// randomly close indices
int numberOfIndicesToClose = randomInt(Math.min(1, state.metaData().indices().size()));
for (String index : randomSubsetOf(numberOfIndicesToClose, state.metaData().indices().keys().toArray(String.class))) {
CloseIndexRequest closeIndexRequest = new CloseIndexRequest(state.metaData().index(index).getIndex().getName());
state = cluster.closeIndices(state, closeIndexRequest);
}
// randomly open indices
int numberOfIndicesToOpen = randomInt(Math.min(1, state.metaData().indices().size()));
for (String index : randomSubsetOf(numberOfIndicesToOpen, state.metaData().indices().keys().toArray(String.class))) {
OpenIndexRequest openIndexRequest = new OpenIndexRequest(state.metaData().index(index).getIndex().getName());
state = cluster.openIndices(state, openIndexRequest);
}
// randomly update settings
Set<String> indicesToUpdate = new HashSet<>();
boolean containsClosedIndex = false;
int numberOfIndicesToUpdate = randomInt(Math.min(2, state.metaData().indices().size()));
for (String index : randomSubsetOf(numberOfIndicesToUpdate, state.metaData().indices().keys().toArray(String.class))) {
indicesToUpdate.add(state.metaData().index(index).getIndex().getName());
if (state.metaData().index(index).getState() == IndexMetaData.State.CLOSE) {
containsClosedIndex = true;
}
}
if (indicesToUpdate.isEmpty() == false) {
UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indicesToUpdate.toArray(new String[indicesToUpdate.size()]));
Settings.Builder settings = Settings.builder();
if (containsClosedIndex == false) {
settings.put(SETTING_NUMBER_OF_REPLICAS, randomInt(2));
}
settings.put("index.refresh_interval", randomIntBetween(1, 5) + "s");
updateSettingsRequest.settings(settings.build());
state = cluster.updateSettings(state, updateSettingsRequest);
}
// randomly reroute
if (rarely()) {
state = cluster.reroute(state, new ClusterRerouteRequest());
}
// randomly start and fail allocated shards
List<ShardRouting> startedShards = new ArrayList<>();
List<FailedShard> failedShards = new ArrayList<>();
for (DiscoveryNode node : state.nodes()) {
IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node);
MockIndicesService indicesService = (MockIndicesService) indicesClusterStateService.indicesService;
for (MockIndexService indexService : indicesService) {
for (MockIndexShard indexShard : indexService) {
ShardRouting persistedShardRouting = indexShard.routingEntry();
if (persistedShardRouting.initializing() && randomBoolean()) {
startedShards.add(persistedShardRouting);
} else if (rarely()) {
failedShards.add(new FailedShard(persistedShardRouting, "fake shard failure", new Exception()));
}
}
}
}
state = cluster.applyFailedShards(state, failedShards);
state = cluster.applyStartedShards(state, startedShards);
// randomly add and remove nodes (except current master)
if (rarely()) {
if (randomBoolean()) {
// add node
if (state.nodes().getSize() < 10) {
DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).add(createNode()).build();
state = ClusterState.builder(state).nodes(newNodes).build();
// always reroute after node leave
state = cluster.reroute(state, new ClusterRerouteRequest());
updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
}
} else {
// remove node
if (state.nodes().getDataNodes().size() > 3) {
DiscoveryNode discoveryNode = randomFrom(state.nodes().getNodes().values().toArray(DiscoveryNode.class));
if (discoveryNode.equals(state.nodes().getMasterNode()) == false) {
DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).remove(discoveryNode.getId()).build();
state = ClusterState.builder(state).nodes(newNodes).build();
state = cluster.deassociateDeadNodes(state, true, "removed and added a node");
updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
}
if (randomBoolean()) {
// and add it back
DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).add(discoveryNode).build();
state = ClusterState.builder(state).nodes(newNodes).build();
state = cluster.reroute(state, new ClusterRerouteRequest());
updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
}
}
}
}
return state;
}
use of org.elasticsearch.cluster.routing.ShardRouting in project elasticsearch by elastic.
the class FlushIT method testSyncedFlush.
public void testSyncedFlush() throws ExecutionException, InterruptedException, IOException {
internalCluster().ensureAtLeastNumDataNodes(2);
prepareCreate("test").setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).get();
ensureGreen();
final Index index = client().admin().cluster().prepareState().get().getState().metaData().index("test").getIndex();
IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
for (ShardStats shardStats : indexStats.getShards()) {
assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
}
ShardsSyncedFlushResult result;
if (randomBoolean()) {
logger.info("--> sync flushing shard 0");
result = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), new ShardId(index, 0));
} else {
logger.info("--> sync flushing index [test]");
SyncedFlushResponse indicesResult = client().admin().indices().prepareSyncedFlush("test").get();
result = indicesResult.getShardsResultPerIndex().get("test").get(0);
}
assertFalse(result.failed());
assertThat(result.totalShards(), equalTo(indexStats.getShards().length));
assertThat(result.successfulShards(), equalTo(indexStats.getShards().length));
indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
String syncId = result.syncId();
for (ShardStats shardStats : indexStats.getShards()) {
final String shardSyncId = shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID);
assertThat(shardSyncId, equalTo(syncId));
}
// now, start new node and relocate a shard there and see if sync id still there
String newNodeName = internalCluster().startNode();
ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
ShardRouting shardRouting = clusterState.getRoutingTable().index("test").shard(0).iterator().next();
String currentNodeName = clusterState.nodes().resolveNode(shardRouting.currentNodeId()).getName();
assertFalse(currentNodeName.equals(newNodeName));
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, currentNodeName, newNodeName)).get();
client().admin().cluster().prepareHealth().setWaitForNoRelocatingShards(true).get();
indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
for (ShardStats shardStats : indexStats.getShards()) {
assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
}
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build()).get();
ensureGreen("test");
indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
for (ShardStats shardStats : indexStats.getShards()) {
assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
}
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, internalCluster().numDataNodes() - 1).build()).get();
ensureGreen("test");
indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
for (ShardStats shardStats : indexStats.getShards()) {
assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
}
}
use of org.elasticsearch.cluster.routing.ShardRouting in project elasticsearch by elastic.
the class FlushIT method assertFlushResponseEqualsShardStats.
private void assertFlushResponseEqualsShardStats(ShardStats[] shardsStats, List<ShardsSyncedFlushResult> syncedFlushResults) {
for (final ShardStats shardStats : shardsStats) {
for (final ShardsSyncedFlushResult shardResult : syncedFlushResults) {
if (shardStats.getShardRouting().getId() == shardResult.shardId().getId()) {
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> singleResponse : shardResult.shardResponses().entrySet()) {
if (singleResponse.getKey().currentNodeId().equals(shardStats.getShardRouting().currentNodeId())) {
if (singleResponse.getValue().success()) {
logger.info("{} sync flushed on node {}", singleResponse.getKey().shardId(), singleResponse.getKey().currentNodeId());
assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
} else {
logger.info("{} sync flush failed for on node {}", singleResponse.getKey().shardId(), singleResponse.getKey().currentNodeId());
assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
}
}
}
}
}
}
}
use of org.elasticsearch.cluster.routing.ShardRouting in project elasticsearch by elastic.
the class SyncedFlushSingleNodeTests method testModificationPreventsFlushing.
public void testModificationPreventsFlushing() throws InterruptedException {
createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId();
final ClusterState state = getInstanceFromNode(ClusterService.class).state();
final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
assertEquals("exactly one active shard", 1, activeShards.size());
Map<String, Engine.CommitId> commitIds = SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
assertEquals("exactly one commit id", 1, commitIds.size());
client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON).get();
String syncId = UUIDs.base64UUID();
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
flushService.sendSyncRequests(syncId, activeShards, state, commitIds, shardId, shardRoutingTable.size(), listener);
listener.latch.await();
assertNull(listener.error);
ShardsSyncedFlushResult syncedFlushResult = listener.result;
assertNotNull(syncedFlushResult);
assertEquals(0, syncedFlushResult.successfulShards());
assertEquals(1, syncedFlushResult.totalShards());
assertEquals(syncId, syncedFlushResult.syncId());
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
assertEquals("pending operations", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
// pull another commit and make sure we can't sync-flush with the old one
SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
listener = new SyncedFlushUtil.LatchedListener();
flushService.sendSyncRequests(syncId, activeShards, state, commitIds, shardId, shardRoutingTable.size(), listener);
listener.latch.await();
assertNull(listener.error);
syncedFlushResult = listener.result;
assertNotNull(syncedFlushResult);
assertEquals(0, syncedFlushResult.successfulShards());
assertEquals(1, syncedFlushResult.totalShards());
assertEquals(syncId, syncedFlushResult.syncId());
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
}
use of org.elasticsearch.cluster.routing.ShardRouting in project elasticsearch by elastic.
the class RareClusterStateIT method testDelayedMappingPropagationOnPrimary.
public void testDelayedMappingPropagationOnPrimary() throws Exception {
// Here we want to test that things go well if there is a first request
// that adds mappings but before mappings are propagated to all nodes
// another index request introduces the same mapping. The master node
// will reply immediately since it did not change the cluster state
// but the change might not be on the node that performed the indexing
// operation yet
Settings settings = Settings.builder().put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), // explicitly set so it won't default to publish timeout
"30s").put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), // don't wait post commit as we are blocking things by design
"0s").build();
final List<String> nodeNames = internalCluster().startNodes(2, settings);
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
final String master = internalCluster().getMasterName();
assertThat(nodeNames, hasItem(master));
String otherNode = null;
for (String node : nodeNames) {
if (node.equals(master) == false) {
otherNode = node;
break;
}
}
assertNotNull(otherNode);
// Don't allocate the shard on the master node
assertAcked(prepareCreate("index").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put("index.routing.allocation.exclude._name", master)).get());
ensureGreen();
// Check routing tables
ClusterState state = client().admin().cluster().prepareState().get().getState();
assertEquals(master, state.nodes().getMasterNode().getName());
List<ShardRouting> shards = state.routingTable().allShards("index");
assertThat(shards, hasSize(1));
for (ShardRouting shard : shards) {
if (shard.primary()) {
// primary must not be on the master node
assertFalse(state.nodes().getMasterNodeId().equals(shard.currentNodeId()));
} else {
// only primaries
fail();
}
}
// Block cluster state processing where our shard is
BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(otherNode, random());
internalCluster().setDisruptionScheme(disruption);
disruption.startDisrupting();
// Add a new mapping...
final AtomicReference<Object> putMappingResponse = new AtomicReference<>();
client().admin().indices().preparePutMapping("index").setType("type").setSource("field", "type=long").execute(new ActionListener<PutMappingResponse>() {
@Override
public void onResponse(PutMappingResponse response) {
putMappingResponse.set(response);
}
@Override
public void onFailure(Exception e) {
putMappingResponse.set(e);
}
});
// ...and wait for mappings to be available on master
assertBusy(new Runnable() {
@Override
public void run() {
ImmutableOpenMap<String, MappingMetaData> indexMappings = client().admin().indices().prepareGetMappings("index").get().getMappings().get("index");
assertNotNull(indexMappings);
MappingMetaData typeMappings = indexMappings.get("type");
assertNotNull(typeMappings);
Object properties;
try {
properties = typeMappings.getSourceAsMap().get("properties");
} catch (IOException e) {
throw new AssertionError(e);
}
assertNotNull(properties);
Object fieldMapping = ((Map<String, Object>) properties).get("field");
assertNotNull(fieldMapping);
}
});
final AtomicReference<Object> docIndexResponse = new AtomicReference<>();
client().prepareIndex("index", "type", "1").setSource("field", 42).execute(new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse response) {
docIndexResponse.set(response);
}
@Override
public void onFailure(Exception e) {
docIndexResponse.set(e);
}
});
// Wait a bit to make sure that the reason why we did not get a response
// is that cluster state processing is blocked and not just that it takes
// time to process the indexing request
Thread.sleep(100);
assertThat(putMappingResponse.get(), equalTo(null));
assertThat(docIndexResponse.get(), equalTo(null));
// Now make sure the indexing request finishes successfully
disruption.stopDisrupting();
assertBusy(new Runnable() {
@Override
public void run() {
assertThat(putMappingResponse.get(), instanceOf(PutMappingResponse.class));
PutMappingResponse resp = (PutMappingResponse) putMappingResponse.get();
assertTrue(resp.isAcknowledged());
assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class));
IndexResponse docResp = (IndexResponse) docIndexResponse.get();
assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()), 1, docResp.getShardInfo().getTotal());
}
});
}
Aggregations