use of com.yahoo.vdslib.state.NodeState in project vespa by vespa-engine.
the class ClusterStateGeneratorTest method fully_observed_distributor_timestamp_not_included_in_state.
@Test
public void fully_observed_distributor_timestamp_not_included_in_state() {
final NodeState nodeState = new NodeState(NodeType.DISTRIBUTOR, State.UP);
nodeState.setStartTimestamp(6000);
final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp().reportDistributorNodeState(0, nodeState);
final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.DISTRIBUTOR, 0));
nodeInfo.setStartTimestamp(6000);
final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
assertThat(state.toString(), equalTo("distributor:5 storage:5"));
}
use of com.yahoo.vdslib.state.NodeState in project vespa by vespa-engine.
the class ClusterStateGeneratorTest method fully_observed_storage_node_timestamp_not_included_in_state.
@Test
public void fully_observed_storage_node_timestamp_not_included_in_state() {
final NodeState nodeState = new NodeState(NodeType.STORAGE, State.UP);
nodeState.setStartTimestamp(5000);
final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp().reportStorageNodeState(0, nodeState);
final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 0));
nodeInfo.setStartTimestamp(5000);
final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
assertThat(state.toString(), equalTo("distributor:5 storage:5"));
}
use of com.yahoo.vdslib.state.NodeState in project vespa by vespa-engine.
the class ClusterStateGeneratorTest method implicit_down_while_listing_buckets_does_not_override_wanted_state.
/**
* Implicit down while reported as init should not kick into effect if the Wanted state
* is set to Maintenance.
*/
@Test
public void implicit_down_while_listing_buckets_does_not_override_wanted_state() {
final NodeState initWhileListingBuckets = new NodeState(NodeType.STORAGE, State.INITIALIZING);
initWhileListingBuckets.setInitProgress(0.0);
final ClusterFixture fixture = ClusterFixture.forFlatCluster(3).bringEntireClusterUp().reportStorageNodeState(1, initWhileListingBuckets).proposeStorageNodeWantedState(1, State.MAINTENANCE);
final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
assertThat(state.toString(), equalTo("distributor:3 storage:3 .1.s:m"));
}
use of com.yahoo.vdslib.state.NodeState in project vespa by vespa-engine.
the class ClusterStateGeneratorTest method storage_node_in_init_mode_while_listing_buckets_is_marked_down.
/**
* A storage node will report itself as being in initializing mode immediately when
* starting up. It can only accept external operations once it has finished listing
* the set of buckets (but not necessarily their contents). As a consequence of this,
* we have to map reported init state while bucket listing mode to Down. This will
* prevent clients from thinking they can use the node and prevent distributors form
* trying to fetch yet non-existent bucket sets from it.
*
* Detecting the bucket-listing stage is currently done by inspecting its init progress
* value and triggering on a sufficiently low value.
*/
@Test
public void storage_node_in_init_mode_while_listing_buckets_is_marked_down() {
final NodeState initWhileListingBuckets = new NodeState(NodeType.STORAGE, State.INITIALIZING);
initWhileListingBuckets.setInitProgress(0.0);
final ClusterFixture fixture = ClusterFixture.forFlatCluster(3).bringEntireClusterUp().reportStorageNodeState(1, initWhileListingBuckets);
final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
assertThat(state.toString(), equalTo("distributor:3 storage:3 .1.s:d"));
}
use of com.yahoo.vdslib.state.NodeState in project vespa by vespa-engine.
the class ClusterStateGeneratorTest method distribution_bit_not_influenced_by_nodes_down_or_in_maintenance.
// TODO do we really want this behavior? It's the legacy one, but it seems... dangerous.. Especially for maintenance
// TODO We generally want to avoid distribution bit decreases if at all possible, since "collapsing"
// the top-level bucket space can cause data loss on timestamp collisions across super buckets.
@Test
public void distribution_bit_not_influenced_by_nodes_down_or_in_maintenance() {
final ClusterFixture fixture = ClusterFixture.forFlatCluster(3).bringEntireClusterUp().reportStorageNodeState(0, new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(7)).reportStorageNodeState(1, new NodeState(NodeType.STORAGE, State.DOWN).setMinUsedBits(6)).reportStorageNodeState(2, new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(5)).proposeStorageNodeWantedState(2, State.MAINTENANCE);
final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
assertThat(state.toString(), equalTo("bits:7 distributor:3 storage:3 .1.s:d .2.s:m"));
}
Aggregations