use of org.apache.helix.model.Partition in project helix by apache.
the class TestCurrentStateComputationStage method testEmptyCS.
@Test
public void testEmptyCS() {
Map<String, Resource> resourceMap = getResourceMap();
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
CurrentStateComputationStage stage = new CurrentStateComputationStage();
runStage(event, new ReadClusterDataStage());
runStage(event, stage);
CurrentStateOutput output = event.getAttribute(AttributeName.CURRENT_STATE.name());
AssertJUnit.assertEquals(output.getCurrentStateMap("testResourceName", new Partition("testResourceName_0")).size(), 0);
}
use of org.apache.helix.model.Partition in project helix by apache.
the class TestIntermediateStateCalcStage method testNoStateMissing.
@Test
public void testNoStateMissing() {
String resourcePrefix = "resource";
int nResource = 4;
int nPartition = 2;
int nReplica = 3;
Set<String> resourceSet = new HashSet<>();
for (int i = 0; i < nResource; i++) {
resourceSet.add(resourcePrefix + "_" + i);
}
preSetup(StateTransitionThrottleConfig.RebalanceType.RECOVERY_BALANCE, resourceSet, nReplica, nReplica);
event.addAttribute(AttributeName.RESOURCES.name(), getResourceMap(resourceSet.toArray(new String[resourceSet.size()]), nPartition, "OnlineOffline"));
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), getResourceMap(resourceSet.toArray(new String[resourceSet.size()]), nPartition, "OnlineOffline"));
// Initialize bestpossible state and current state
BestPossibleStateOutput bestPossibleStateOutput = new BestPossibleStateOutput();
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
IntermediateStateOutput expectedResult = new IntermediateStateOutput();
for (String resource : resourceSet) {
IdealState is = accessor.getProperty(accessor.keyBuilder().idealStates(resource));
setSingleIdealState(is);
Map<String, List<String>> partitionMap = new HashMap<String, List<String>>();
for (int p = 0; p < nPartition; p++) {
Partition partition = new Partition(resource + "_" + p);
for (int r = 0; r < nReplica; r++) {
String instanceName = HOSTNAME_PREFIX + r;
partitionMap.put(partition.getPartitionName(), Collections.singletonList(instanceName));
if (resource.endsWith("0")) {
// Regular recovery balance
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
// should be recovered:
expectedResult.setState(resource, partition, instanceName, "ONLINE");
} else if (resource.endsWith("1")) {
// Regular load balance
currentStateOutput.setCurrentState(resource, partition, instanceName, "ONLINE");
currentStateOutput.setCurrentState(resource, partition, instanceName + "-1", "OFFLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
// should be recovered:
expectedResult.setState(resource, partition, instanceName, "ONLINE");
} else if (resource.endsWith("2")) {
// Recovery balance with transient states, should keep the current states in the output.
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
bestPossibleStateOutput.setState(resource, partition, instanceName, "OFFLINE");
// should be kept unchanged:
expectedResult.setState(resource, partition, instanceName, "OFFLINE");
} else if (resource.endsWith("3")) {
// One unresolved error should not prevent recovery balance
bestPossibleStateOutput.setState(resource, partition, instanceName, "ONLINE");
if (p == 0) {
if (r == 0) {
currentStateOutput.setCurrentState(resource, partition, instanceName, "ERROR");
bestPossibleStateOutput.setState(resource, partition, instanceName, "ERROR");
// This partition is still ERROR
expectedResult.setState(resource, partition, instanceName, "ERROR");
} else {
currentStateOutput.setCurrentState(resource, partition, instanceName, "OFFLINE");
// Recovery balance
expectedResult.setState(resource, partition, instanceName, "ONLINE");
}
} else {
currentStateOutput.setCurrentState(resource, partition, instanceName, "ONLINE");
currentStateOutput.setCurrentState(resource, partition, instanceName + "-1", "OFFLINE");
// load balance is throttled, so keep all current states
expectedResult.setState(resource, partition, instanceName, "ONLINE");
expectedResult.setState(resource, partition, instanceName + "-1", "OFFLINE");
}
}
}
}
bestPossibleStateOutput.setPreferenceLists(resource, partitionMap);
}
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), bestPossibleStateOutput);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
runStage(event, new ReadClusterDataStage());
// Keep update the current state.
for (int i = 0; i < resourceSet.size(); i++) {
runStage(event, new IntermediateStateCalcStage());
}
IntermediateStateOutput output = event.getAttribute(AttributeName.INTERMEDIATE_STATE.name());
for (String resource : resourceSet) {
// Note Assert.assertEquals won't work. If "actual" is an empty map, it won't compare anything.
Assert.assertTrue(output.getPartitionStateMap(resource).getStateMap().equals(expectedResult.getPartitionStateMap(resource).getStateMap()));
}
}
use of org.apache.helix.model.Partition in project helix by apache.
the class TestMessageThrottleStage method testMsgThrottleBasic.
@Test
public void testMsgThrottleBasic() throws Exception {
String clusterName = "CLUSTER_" + _className + "_basic";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient));
HelixManager manager = new DummyClusterManager(clusterName, accessor);
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[] { 0, 1 }, new String[] { "TestDB" }, 1, 2);
setupLiveInstances(clusterName, new int[] { 0, 1 });
setupStateModel(clusterName);
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
ClusterDataCache cache = new ClusterDataCache(clusterName);
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.ClusterDataCache.name(), cache);
MessageThrottleStage throttleStage = new MessageThrottleStage();
try {
runStage(event, throttleStage);
Assert.fail("Should throw exception since DATA_CACHE is null");
} catch (Exception e) {
// OK
}
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
runPipeline(event, dataRefresh);
try {
runStage(event, throttleStage);
Assert.fail("Should throw exception since RESOURCE is null");
} catch (Exception e) {
// OK
}
runStage(event, new ResourceComputationStage());
try {
runStage(event, throttleStage);
Assert.fail("Should throw exception since MESSAGE_SELECT is null");
} catch (Exception e) {
// OK
}
MessageSelectionStageOutput msgSelectOutput = new MessageSelectionStageOutput();
List<Message> selectMessages = new ArrayList<Message>();
Message msg = createMessage(MessageType.STATE_TRANSITION, "msgId-001", "OFFLINE", "SLAVE", "TestDB", "localhost_0");
selectMessages.add(msg);
msgSelectOutput.addMessages("TestDB", new Partition("TestDB_0"), selectMessages);
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(), msgSelectOutput);
runStage(event, throttleStage);
MessageThrottleStageOutput msgThrottleOutput = event.getAttribute(AttributeName.MESSAGES_THROTTLE.name());
Assert.assertEquals(msgThrottleOutput.getMessages("TestDB", new Partition("TestDB_0")).size(), 1);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.model.Partition in project helix by apache.
the class TestMessageThrottleStage method testMsgThrottleConstraints.
@Test()
public void testMsgThrottleConstraints() throws Exception {
String clusterName = "CLUSTER_" + _className + "_constraints";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient));
HelixManager manager = new DummyClusterManager(clusterName, accessor);
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[] { 0, 1 }, new String[] { "TestDB" }, 1, 2);
setupLiveInstances(clusterName, new int[] { 0, 1 });
setupStateModel(clusterName);
// setup constraints
ZNRecord record = new ZNRecord(ConstraintType.MESSAGE_CONSTRAINT.toString());
// constraint0:
// "MESSAGE_TYPE=STATE_TRANSITION,CONSTRAINT_VALUE=ANY"
record.setMapField("constraint0", new TreeMap<String, String>());
record.getMapField("constraint0").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint0").put("CONSTRAINT_VALUE", "ANY");
ConstraintItem constraint0 = new ConstraintItem(record.getMapField("constraint0"));
// constraint1:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,CONSTRAINT_VALUE=ANY"
record.setMapField("constraint1", new TreeMap<String, String>());
record.getMapField("constraint1").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint1").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint1").put("CONSTRAINT_VALUE", "50");
ConstraintItem constraint1 = new ConstraintItem(record.getMapField("constraint1"));
// constraint2:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=.*,RESOURCE=TestDB,CONSTRAINT_VALUE=2";
record.setMapField("constraint2", new TreeMap<String, String>());
record.getMapField("constraint2").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint2").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint2").put("INSTANCE", ".*");
record.getMapField("constraint2").put("RESOURCE", "TestDB");
record.getMapField("constraint2").put("CONSTRAINT_VALUE", "2");
ConstraintItem constraint2 = new ConstraintItem(record.getMapField("constraint2"));
// constraint3:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=localhost_12918,RESOURCE=.*,CONSTRAINT_VALUE=1";
record.setMapField("constraint3", new TreeMap<String, String>());
record.getMapField("constraint3").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint3").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint3").put("INSTANCE", "localhost_1");
record.getMapField("constraint3").put("RESOURCE", ".*");
record.getMapField("constraint3").put("CONSTRAINT_VALUE", "1");
ConstraintItem constraint3 = new ConstraintItem(record.getMapField("constraint3"));
// constraint4:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=.*,RESOURCE=.*,CONSTRAINT_VALUE=10"
record.setMapField("constraint4", new TreeMap<String, String>());
record.getMapField("constraint4").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint4").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint4").put("INSTANCE", ".*");
record.getMapField("constraint4").put("RESOURCE", ".*");
record.getMapField("constraint4").put("CONSTRAINT_VALUE", "10");
ConstraintItem constraint4 = new ConstraintItem(record.getMapField("constraint4"));
// constraint5:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=localhost_12918,RESOURCE=TestDB,CONSTRAINT_VALUE=5"
record.setMapField("constraint5", new TreeMap<String, String>());
record.getMapField("constraint5").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint5").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint5").put("INSTANCE", "localhost_0");
record.getMapField("constraint5").put("RESOURCE", "TestDB");
record.getMapField("constraint5").put("CONSTRAINT_VALUE", "3");
ConstraintItem constraint5 = new ConstraintItem(record.getMapField("constraint5"));
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.constraint(ConstraintType.MESSAGE_CONSTRAINT.toString()), new ClusterConstraints(record));
// ClusterConstraints constraint =
// accessor.getProperty(ClusterConstraints.class,
// PropertyType.CONFIGS,
// ConfigScopeProperty.CONSTRAINT.toString(),
// ConstraintType.MESSAGE_CONSTRAINT.toString());
ClusterConstraints constraint = accessor.getProperty(keyBuilder.constraint(ConstraintType.MESSAGE_CONSTRAINT.toString()));
MessageThrottleStage throttleStage = new MessageThrottleStage();
// test constraintSelection
// message1: hit contraintSelection rule1 and rule2
Message msg1 = createMessage(MessageType.STATE_TRANSITION, "msgId-001", "OFFLINE", "SLAVE", "TestDB", "localhost_0");
Map<ConstraintAttribute, String> msgAttr = ClusterConstraints.toConstraintAttributes(msg1);
Set<ConstraintItem> matches = constraint.match(msgAttr);
System.out.println(msg1 + " matches(" + matches.size() + "): " + matches);
Assert.assertEquals(matches.size(), 5);
Assert.assertTrue(containsConstraint(matches, constraint0));
Assert.assertTrue(containsConstraint(matches, constraint1));
Assert.assertTrue(containsConstraint(matches, constraint2));
Assert.assertTrue(containsConstraint(matches, constraint4));
Assert.assertTrue(containsConstraint(matches, constraint5));
matches = throttleStage.selectConstraints(matches, msgAttr);
System.out.println(msg1 + " matches(" + matches.size() + "): " + matches);
Assert.assertEquals(matches.size(), 2);
Assert.assertTrue(containsConstraint(matches, constraint1));
Assert.assertTrue(containsConstraint(matches, constraint5));
// message2: hit contraintSelection rule1, rule2, and rule3
Message msg2 = createMessage(MessageType.STATE_TRANSITION, "msgId-002", "OFFLINE", "SLAVE", "TestDB", "localhost_1");
msgAttr = ClusterConstraints.toConstraintAttributes(msg2);
matches = constraint.match(msgAttr);
System.out.println(msg2 + " matches(" + matches.size() + "): " + matches);
Assert.assertEquals(matches.size(), 5);
Assert.assertTrue(containsConstraint(matches, constraint0));
Assert.assertTrue(containsConstraint(matches, constraint1));
Assert.assertTrue(containsConstraint(matches, constraint2));
Assert.assertTrue(containsConstraint(matches, constraint3));
Assert.assertTrue(containsConstraint(matches, constraint4));
matches = throttleStage.selectConstraints(matches, msgAttr);
System.out.println(msg2 + " matches(" + matches.size() + "): " + matches);
Assert.assertEquals(matches.size(), 2);
Assert.assertTrue(containsConstraint(matches, constraint1));
Assert.assertTrue(containsConstraint(matches, constraint3));
// test messageThrottleStage
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
ClusterDataCache cache = new ClusterDataCache(clusterName);
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.ClusterDataCache.name(), cache);
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
runPipeline(event, dataRefresh);
runStage(event, new ResourceComputationStage());
MessageSelectionStageOutput msgSelectOutput = new MessageSelectionStageOutput();
Message msg3 = createMessage(MessageType.STATE_TRANSITION, "msgId-003", "OFFLINE", "SLAVE", "TestDB", "localhost_0");
Message msg4 = createMessage(MessageType.STATE_TRANSITION, "msgId-004", "OFFLINE", "SLAVE", "TestDB", "localhost_0");
Message msg5 = createMessage(MessageType.STATE_TRANSITION, "msgId-005", "OFFLINE", "SLAVE", "TestDB", "localhost_0");
Message msg6 = createMessage(MessageType.STATE_TRANSITION, "msgId-006", "OFFLINE", "SLAVE", "TestDB", "localhost_1");
List<Message> selectMessages = new ArrayList<Message>();
selectMessages.add(msg1);
selectMessages.add(msg2);
selectMessages.add(msg3);
selectMessages.add(msg4);
// should be throttled
selectMessages.add(msg5);
// should be throttled
selectMessages.add(msg6);
msgSelectOutput.addMessages("TestDB", new Partition("TestDB_0"), selectMessages);
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(), msgSelectOutput);
runStage(event, throttleStage);
MessageThrottleStageOutput msgThrottleOutput = event.getAttribute(AttributeName.MESSAGES_THROTTLE.name());
List<Message> throttleMessages = msgThrottleOutput.getMessages("TestDB", new Partition("TestDB_0"));
Assert.assertEquals(throttleMessages.size(), 4);
Assert.assertTrue(throttleMessages.contains(msg1));
Assert.assertTrue(throttleMessages.contains(msg2));
Assert.assertTrue(throttleMessages.contains(msg3));
Assert.assertTrue(throttleMessages.contains(msg4));
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.model.Partition in project helix by apache.
the class TestRebalancePipeline method testChangeIdealStateWithPendingMsg.
@Test
public void testChangeIdealStateWithPendingMsg() {
String clusterName = "CLUSTER_" + _className + "_pending";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient));
HelixManager manager = new DummyClusterManager(clusterName, accessor);
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
event.addAttribute(AttributeName.helixmanager.name(), manager);
ClusterDataCache cache = new ClusterDataCache();
event.addAttribute(AttributeName.ClusterDataCache.name(), cache);
refreshClusterConfig(clusterName, accessor);
final String resourceName = "testResource_pending";
String[] resourceGroups = new String[] { resourceName };
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[] { 0 }, resourceGroups, 1, 1);
setupLiveInstances(clusterName, new int[] { 0 });
setupStateModel(clusterName);
// cluster data cache refresh pipeline
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
// rebalance pipeline
Pipeline rebalancePipeline = new Pipeline();
rebalancePipeline.addStage(new ResourceComputationStage());
rebalancePipeline.addStage(new CurrentStateComputationStage());
rebalancePipeline.addStage(new BestPossibleStateCalcStage());
rebalancePipeline.addStage(new IntermediateStateCalcStage());
rebalancePipeline.addStage(new MessageGenerationPhase());
rebalancePipeline.addStage(new MessageSelectionStage());
rebalancePipeline.addStage(new MessageThrottleStage());
rebalancePipeline.addStage(new TaskAssignmentStage());
// round1: set node0 currentState to OFFLINE and node1 currentState to SLAVE
setCurrentState(clusterName, "localhost_0", resourceName, resourceName + "_0", "session_0", "OFFLINE");
runPipeline(event, dataRefresh);
runPipeline(event, rebalancePipeline);
MessageSelectionStageOutput msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
List<Message> messages = msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 1, "Should output 1 message: OFFLINE-SLAVE for node0");
Message message = messages.get(0);
Assert.assertEquals(message.getFromState(), "OFFLINE");
Assert.assertEquals(message.getToState(), "SLAVE");
Assert.assertEquals(message.getTgtName(), "localhost_0");
// round2: drop resource, but keep the
// message, make sure controller should not send O->DROPPED until O->S is done
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.dropResource(clusterName, resourceName);
List<IdealState> idealStates = accessor.getChildValues(accessor.keyBuilder().idealStates());
cache.setIdealStates(idealStates);
runPipeline(event, dataRefresh);
cache = event.getAttribute(AttributeName.ClusterDataCache.name());
cache.setClusterConfig(new ClusterConfig(clusterName));
runPipeline(event, rebalancePipeline);
msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 0, "Should not output only 1 message: OFFLINE->DROPPED for localhost_0");
// round3: remove O->S message for localhost_0, localhost_0 still in OFFLINE
// controller should now send O->DROPPED to localhost_0
Builder keyBuilder = accessor.keyBuilder();
List<String> msgIds = accessor.getChildNames(keyBuilder.messages("localhost_0"));
accessor.removeProperty(keyBuilder.message("localhost_0", msgIds.get(0)));
runPipeline(event, dataRefresh);
runPipeline(event, rebalancePipeline);
msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 1, "Should output 1 message: OFFLINE->DROPPED for localhost_0");
message = messages.get(0);
Assert.assertEquals(message.getFromState(), "OFFLINE");
Assert.assertEquals(message.getToState(), "DROPPED");
Assert.assertEquals(message.getTgtName(), "localhost_0");
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
Aggregations