use of org.elasticsearch.cluster.metadata.IndexMetaData in project elasticsearch by elastic.
the class FilterAllocationDeciderTests method createInitialClusterState.
private ClusterState createInitialClusterState(AllocationService service, Settings settings) {
RecoverySource.Type recoveryType = randomFrom(RecoverySource.Type.EMPTY_STORE, RecoverySource.Type.LOCAL_SHARDS, RecoverySource.Type.SNAPSHOT);
MetaData.Builder metaData = MetaData.builder();
final Settings.Builder indexSettings = settings(Version.CURRENT).put(settings);
final IndexMetaData sourceIndex;
if (recoveryType == RecoverySource.Type.LOCAL_SHARDS) {
//put a fake closed source index
sourceIndex = IndexMetaData.builder("sourceIndex").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0).putInSyncAllocationIds(0, Collections.singleton("aid0")).putInSyncAllocationIds(1, Collections.singleton("aid1")).build();
metaData.put(sourceIndex, false);
indexSettings.put(INDEX_SHRINK_SOURCE_UUID.getKey(), sourceIndex.getIndexUUID());
indexSettings.put(INDEX_SHRINK_SOURCE_NAME.getKey(), sourceIndex.getIndex().getName());
} else {
sourceIndex = null;
}
final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder("idx").settings(indexSettings).numberOfShards(1).numberOfReplicas(1);
if (recoveryType == RecoverySource.Type.SNAPSHOT) {
indexMetaDataBuilder.putInSyncAllocationIds(0, Collections.singleton("_snapshot_restore"));
}
final IndexMetaData indexMetaData = indexMetaDataBuilder.build();
metaData.put(indexMetaData, false);
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
switch(recoveryType) {
case EMPTY_STORE:
routingTableBuilder.addAsNew(indexMetaData);
break;
case SNAPSHOT:
routingTableBuilder.addAsRestore(indexMetaData, new RecoverySource.SnapshotRecoverySource(new Snapshot("repository", new SnapshotId("snapshot_name", "snapshot_uuid")), Version.CURRENT, indexMetaData.getIndex().getName()));
break;
case LOCAL_SHARDS:
routingTableBuilder.addAsFromCloseToOpen(sourceIndex);
routingTableBuilder.addAsNew(indexMetaData);
break;
default:
throw new UnsupportedOperationException(recoveryType + " is not supported");
}
RoutingTable routingTable = routingTableBuilder.build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
return service.reroute(clusterState, "reroute", false);
}
use of org.elasticsearch.cluster.metadata.IndexMetaData in project elasticsearch by elastic.
the class EnableAllocationTests method testEnableClusterBalanceNoReplicas.
public void testEnableClusterBalanceNoReplicas() {
final boolean useClusterSetting = randomBoolean();
Settings build = Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), // index settings override cluster settings
useClusterSetting ? Rebalance.NONE : RandomPicks.randomFrom(random(), Rebalance.values())).put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 3).build();
ClusterSettings clusterSettings = new ClusterSettings(build, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
AllocationService strategy = createAllocationService(build, clusterSettings, random());
Settings indexSettings = useClusterSetting ? Settings.EMPTY : Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE).build();
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put(indexSettings)).numberOfShards(6).numberOfReplicas(0)).build();
RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding one nodes and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(6));
logger.info("--> start the shards (primaries)");
clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(6));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
logger.info("--> adding one nodes and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(6));
assertThat(clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), equalTo(0));
metaData = clusterState.metaData();
if (useClusterSetting) {
clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).transientSettings(Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL).build())).build();
} else {
IndexMetaData meta = clusterState.getMetaData().index("test");
clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).removeAllIndices().put(IndexMetaData.builder(meta).settings(Settings.builder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL).build()))).build();
}
clusterSettings.applySettings(clusterState.metaData().settings());
clusterState = strategy.reroute(clusterState, "reroute");
assertThat("expected 4 primaries to be started and 2 to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4));
assertThat("expected 2 primaries to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), equalTo(2));
}
use of org.elasticsearch.cluster.metadata.IndexMetaData in project elasticsearch by elastic.
the class DiskThresholdDeciderUnitTests method testCanRemainUsesLeastAvailableSpace.
public void testCanRemainUsesLeastAvailableSpace() {
ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss);
ImmutableOpenMap.Builder<ShardRouting, String> shardRoutingMap = ImmutableOpenMap.builder();
DiscoveryNode node_0 = new DiscoveryNode("node_0", buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT);
DiscoveryNode node_1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT);
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)).build();
final IndexMetaData indexMetaData = metaData.index("test");
ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 0), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_0 = ShardRoutingHelper.initialize(test_0, node_0.getId());
test_0 = ShardRoutingHelper.moveToStarted(test_0);
shardRoutingMap.put(test_0, "/node0/least");
ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 1), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_1 = ShardRoutingHelper.initialize(test_1, node_1.getId());
test_1 = ShardRoutingHelper.moveToStarted(test_1);
shardRoutingMap.put(test_1, "/node1/least");
ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 2), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_2 = ShardRoutingHelper.initialize(test_2, node_1.getId());
test_2 = ShardRoutingHelper.moveToStarted(test_2);
shardRoutingMap.put(test_2, "/node1/most");
ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 3), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_3 = ShardRoutingHelper.initialize(test_3, node_1.getId());
test_3 = ShardRoutingHelper.moveToStarted(test_3);
// Intentionally not in the shardRoutingMap. We want to test what happens when we don't know where it is.
RoutingTable routingTable = RoutingTable.builder().addAsNew(indexMetaData).build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("--> adding two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(node_0).add(node_1)).build();
// actual test -- after all that bloat :)
ImmutableOpenMap.Builder<String, DiskUsage> leastAvailableUsages = ImmutableOpenMap.builder();
// 90% used
leastAvailableUsages.put("node_0", new DiskUsage("node_0", "node_0", "/node0/least", 100, 10));
// 91% used
leastAvailableUsages.put("node_1", new DiskUsage("node_1", "node_1", "/node1/least", 100, 9));
ImmutableOpenMap.Builder<String, DiskUsage> mostAvailableUsage = ImmutableOpenMap.builder();
// 10% used
mostAvailableUsage.put("node_0", new DiskUsage("node_0", "node_0", "/node0/most", 100, 90));
// 10% used
mostAvailableUsage.put("node_1", new DiskUsage("node_1", "node_1", "/node1/most", 100, 90));
ImmutableOpenMap.Builder<String, Long> shardSizes = ImmutableOpenMap.builder();
// 10 bytes
shardSizes.put("[test][0][p]", 10L);
shardSizes.put("[test][1][p]", 10L);
shardSizes.put("[test][2][p]", 10L);
final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), shardRoutingMap.build());
RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.singleton(decider)), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime(), false);
allocation.debugDecision(true);
Decision decision = decider.canRemain(test_0, new RoutingNode("node_0", node_0), allocation);
assertEquals(Decision.Type.YES, decision.type());
assertThat(((Decision.Single) decision).getExplanation(), containsString("there is enough disk on this node for the shard to remain, free: [10b]"));
decision = decider.canRemain(test_1, new RoutingNode("node_1", node_1), allocation);
assertEquals(Decision.Type.NO, decision.type());
assertThat(((Decision.Single) decision).getExplanation(), containsString("the shard cannot remain on this node because it is " + "above the high watermark cluster setting [cluster.routing.allocation.disk.watermark.high=90%] and there is less than " + "the required [10.0%] free disk on node, actual free: [9.0%]"));
try {
decider.canRemain(test_0, new RoutingNode("node_1", node_1), allocation);
fail("not allocated on this node");
} catch (IllegalArgumentException ex) {
// not allocated on that node
}
try {
decider.canRemain(test_1, new RoutingNode("node_0", node_0), allocation);
fail("not allocated on this node");
} catch (IllegalArgumentException ex) {
// not allocated on that node
}
decision = decider.canRemain(test_2, new RoutingNode("node_1", node_1), allocation);
assertEquals("can stay since allocated on a different path with enough space", Decision.Type.YES, decision.type());
assertThat(((Decision.Single) decision).getExplanation(), containsString("this shard is not allocated on the most utilized disk and can remain"));
decision = decider.canRemain(test_2, new RoutingNode("node_1", node_1), allocation);
assertEquals("can stay since we don't have information about this shard", Decision.Type.YES, decision.type());
assertThat(((Decision.Single) decision).getExplanation(), containsString("this shard is not allocated on the most utilized disk and can remain"));
}
use of org.elasticsearch.cluster.metadata.IndexMetaData in project elasticsearch by elastic.
the class ShardStateIT method assertPrimaryTerms.
protected void assertPrimaryTerms(long term0, long term1) {
for (String node : internalCluster().getNodeNames()) {
logger.debug("--> asserting primary terms terms on [{}]", node);
ClusterState state = client(node).admin().cluster().prepareState().setLocal(true).get().getState();
IndexMetaData metaData = state.metaData().index("test");
assertThat(metaData.primaryTerm(0), equalTo(term0));
assertThat(metaData.primaryTerm(1), equalTo(term1));
IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node);
IndexService indexService = indicesService.indexService(metaData.getIndex());
if (indexService != null) {
for (IndexShard shard : indexService) {
assertThat("term mismatch for shard " + shard.shardId(), shard.getPrimaryTerm(), equalTo(metaData.primaryTerm(shard.shardId().id())));
}
}
}
}
use of org.elasticsearch.cluster.metadata.IndexMetaData in project elasticsearch by elastic.
the class AnalysisPolishFactoryTests method testThreadSafety.
public void testThreadSafety() throws IOException {
// TODO: is this the right boilerplate? I forked this out of TransportAnalyzeAction.java:
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
Environment environment = new Environment(settings);
IndexMetaData metaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(settings).build();
IndexSettings indexSettings = new IndexSettings(metaData, Settings.EMPTY);
testThreadSafety(new PolishStemTokenFilterFactory(indexSettings, environment, "stempelpolishstem", settings));
}
Aggregations