use of org.apache.helix.manager.zk.ZKHelixDataAccessor in project helix by apache.
the class TestHelper method verifyEmptyCurStateAndExtView.
public static boolean verifyEmptyCurStateAndExtView(String clusterName, String resourceName, Set<String> instanceNames, String zkAddr) {
ZkClient zkClient = new ZkClient(zkAddr);
zkClient.setZkSerializer(new ZNRecordSerializer());
try {
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
Builder keyBuilder = accessor.keyBuilder();
for (String instanceName : instanceNames) {
List<String> sessionIds = accessor.getChildNames(keyBuilder.sessions(instanceName));
for (String sessionId : sessionIds) {
CurrentState curState = accessor.getProperty(keyBuilder.currentState(instanceName, sessionId, resourceName));
if (curState != null && curState.getRecord().getMapFields().size() != 0) {
return false;
}
}
ExternalView extView = accessor.getProperty(keyBuilder.externalView(resourceName));
if (extView != null && extView.getRecord().getMapFields().size() != 0) {
return false;
}
}
return true;
} finally {
zkClient.close();
}
}
use of org.apache.helix.manager.zk.ZKHelixDataAccessor in project helix by apache.
the class TestMessageThrottleStage method testMsgThrottleBasic.
@Test
public void testMsgThrottleBasic() throws Exception {
String clusterName = "CLUSTER_" + _className + "_basic";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient));
HelixManager manager = new DummyClusterManager(clusterName, accessor);
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[] { 0, 1 }, new String[] { "TestDB" }, 1, 2);
setupLiveInstances(clusterName, new int[] { 0, 1 });
setupStateModel(clusterName);
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
ClusterDataCache cache = new ClusterDataCache(clusterName);
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.ClusterDataCache.name(), cache);
MessageThrottleStage throttleStage = new MessageThrottleStage();
try {
runStage(event, throttleStage);
Assert.fail("Should throw exception since DATA_CACHE is null");
} catch (Exception e) {
// OK
}
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
runPipeline(event, dataRefresh);
try {
runStage(event, throttleStage);
Assert.fail("Should throw exception since RESOURCE is null");
} catch (Exception e) {
// OK
}
runStage(event, new ResourceComputationStage());
try {
runStage(event, throttleStage);
Assert.fail("Should throw exception since MESSAGE_SELECT is null");
} catch (Exception e) {
// OK
}
MessageSelectionStageOutput msgSelectOutput = new MessageSelectionStageOutput();
List<Message> selectMessages = new ArrayList<Message>();
Message msg = createMessage(MessageType.STATE_TRANSITION, "msgId-001", "OFFLINE", "SLAVE", "TestDB", "localhost_0");
selectMessages.add(msg);
msgSelectOutput.addMessages("TestDB", new Partition("TestDB_0"), selectMessages);
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(), msgSelectOutput);
runStage(event, throttleStage);
MessageThrottleStageOutput msgThrottleOutput = event.getAttribute(AttributeName.MESSAGES_THROTTLE.name());
Assert.assertEquals(msgThrottleOutput.getMessages("TestDB", new Partition("TestDB_0")).size(), 1);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.manager.zk.ZKHelixDataAccessor in project helix by apache.
the class TestMessageThrottleStage method testMsgThrottleConstraints.
@Test()
public void testMsgThrottleConstraints() throws Exception {
String clusterName = "CLUSTER_" + _className + "_constraints";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient));
HelixManager manager = new DummyClusterManager(clusterName, accessor);
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[] { 0, 1 }, new String[] { "TestDB" }, 1, 2);
setupLiveInstances(clusterName, new int[] { 0, 1 });
setupStateModel(clusterName);
// setup constraints
ZNRecord record = new ZNRecord(ConstraintType.MESSAGE_CONSTRAINT.toString());
// constraint0:
// "MESSAGE_TYPE=STATE_TRANSITION,CONSTRAINT_VALUE=ANY"
record.setMapField("constraint0", new TreeMap<String, String>());
record.getMapField("constraint0").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint0").put("CONSTRAINT_VALUE", "ANY");
ConstraintItem constraint0 = new ConstraintItem(record.getMapField("constraint0"));
// constraint1:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,CONSTRAINT_VALUE=ANY"
record.setMapField("constraint1", new TreeMap<String, String>());
record.getMapField("constraint1").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint1").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint1").put("CONSTRAINT_VALUE", "50");
ConstraintItem constraint1 = new ConstraintItem(record.getMapField("constraint1"));
// constraint2:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=.*,RESOURCE=TestDB,CONSTRAINT_VALUE=2";
record.setMapField("constraint2", new TreeMap<String, String>());
record.getMapField("constraint2").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint2").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint2").put("INSTANCE", ".*");
record.getMapField("constraint2").put("RESOURCE", "TestDB");
record.getMapField("constraint2").put("CONSTRAINT_VALUE", "2");
ConstraintItem constraint2 = new ConstraintItem(record.getMapField("constraint2"));
// constraint3:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=localhost_12918,RESOURCE=.*,CONSTRAINT_VALUE=1";
record.setMapField("constraint3", new TreeMap<String, String>());
record.getMapField("constraint3").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint3").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint3").put("INSTANCE", "localhost_1");
record.getMapField("constraint3").put("RESOURCE", ".*");
record.getMapField("constraint3").put("CONSTRAINT_VALUE", "1");
ConstraintItem constraint3 = new ConstraintItem(record.getMapField("constraint3"));
// constraint4:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=.*,RESOURCE=.*,CONSTRAINT_VALUE=10"
record.setMapField("constraint4", new TreeMap<String, String>());
record.getMapField("constraint4").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint4").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint4").put("INSTANCE", ".*");
record.getMapField("constraint4").put("RESOURCE", ".*");
record.getMapField("constraint4").put("CONSTRAINT_VALUE", "10");
ConstraintItem constraint4 = new ConstraintItem(record.getMapField("constraint4"));
// constraint5:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=localhost_12918,RESOURCE=TestDB,CONSTRAINT_VALUE=5"
record.setMapField("constraint5", new TreeMap<String, String>());
record.getMapField("constraint5").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint5").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint5").put("INSTANCE", "localhost_0");
record.getMapField("constraint5").put("RESOURCE", "TestDB");
record.getMapField("constraint5").put("CONSTRAINT_VALUE", "3");
ConstraintItem constraint5 = new ConstraintItem(record.getMapField("constraint5"));
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.constraint(ConstraintType.MESSAGE_CONSTRAINT.toString()), new ClusterConstraints(record));
// ClusterConstraints constraint =
// accessor.getProperty(ClusterConstraints.class,
// PropertyType.CONFIGS,
// ConfigScopeProperty.CONSTRAINT.toString(),
// ConstraintType.MESSAGE_CONSTRAINT.toString());
ClusterConstraints constraint = accessor.getProperty(keyBuilder.constraint(ConstraintType.MESSAGE_CONSTRAINT.toString()));
MessageThrottleStage throttleStage = new MessageThrottleStage();
// test constraintSelection
// message1: hit contraintSelection rule1 and rule2
Message msg1 = createMessage(MessageType.STATE_TRANSITION, "msgId-001", "OFFLINE", "SLAVE", "TestDB", "localhost_0");
Map<ConstraintAttribute, String> msgAttr = ClusterConstraints.toConstraintAttributes(msg1);
Set<ConstraintItem> matches = constraint.match(msgAttr);
System.out.println(msg1 + " matches(" + matches.size() + "): " + matches);
Assert.assertEquals(matches.size(), 5);
Assert.assertTrue(containsConstraint(matches, constraint0));
Assert.assertTrue(containsConstraint(matches, constraint1));
Assert.assertTrue(containsConstraint(matches, constraint2));
Assert.assertTrue(containsConstraint(matches, constraint4));
Assert.assertTrue(containsConstraint(matches, constraint5));
matches = throttleStage.selectConstraints(matches, msgAttr);
System.out.println(msg1 + " matches(" + matches.size() + "): " + matches);
Assert.assertEquals(matches.size(), 2);
Assert.assertTrue(containsConstraint(matches, constraint1));
Assert.assertTrue(containsConstraint(matches, constraint5));
// message2: hit contraintSelection rule1, rule2, and rule3
Message msg2 = createMessage(MessageType.STATE_TRANSITION, "msgId-002", "OFFLINE", "SLAVE", "TestDB", "localhost_1");
msgAttr = ClusterConstraints.toConstraintAttributes(msg2);
matches = constraint.match(msgAttr);
System.out.println(msg2 + " matches(" + matches.size() + "): " + matches);
Assert.assertEquals(matches.size(), 5);
Assert.assertTrue(containsConstraint(matches, constraint0));
Assert.assertTrue(containsConstraint(matches, constraint1));
Assert.assertTrue(containsConstraint(matches, constraint2));
Assert.assertTrue(containsConstraint(matches, constraint3));
Assert.assertTrue(containsConstraint(matches, constraint4));
matches = throttleStage.selectConstraints(matches, msgAttr);
System.out.println(msg2 + " matches(" + matches.size() + "): " + matches);
Assert.assertEquals(matches.size(), 2);
Assert.assertTrue(containsConstraint(matches, constraint1));
Assert.assertTrue(containsConstraint(matches, constraint3));
// test messageThrottleStage
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
ClusterDataCache cache = new ClusterDataCache(clusterName);
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.ClusterDataCache.name(), cache);
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
runPipeline(event, dataRefresh);
runStage(event, new ResourceComputationStage());
MessageSelectionStageOutput msgSelectOutput = new MessageSelectionStageOutput();
Message msg3 = createMessage(MessageType.STATE_TRANSITION, "msgId-003", "OFFLINE", "SLAVE", "TestDB", "localhost_0");
Message msg4 = createMessage(MessageType.STATE_TRANSITION, "msgId-004", "OFFLINE", "SLAVE", "TestDB", "localhost_0");
Message msg5 = createMessage(MessageType.STATE_TRANSITION, "msgId-005", "OFFLINE", "SLAVE", "TestDB", "localhost_0");
Message msg6 = createMessage(MessageType.STATE_TRANSITION, "msgId-006", "OFFLINE", "SLAVE", "TestDB", "localhost_1");
List<Message> selectMessages = new ArrayList<Message>();
selectMessages.add(msg1);
selectMessages.add(msg2);
selectMessages.add(msg3);
selectMessages.add(msg4);
// should be throttled
selectMessages.add(msg5);
// should be throttled
selectMessages.add(msg6);
msgSelectOutput.addMessages("TestDB", new Partition("TestDB_0"), selectMessages);
event.addAttribute(AttributeName.MESSAGES_SELECTED.name(), msgSelectOutput);
runStage(event, throttleStage);
MessageThrottleStageOutput msgThrottleOutput = event.getAttribute(AttributeName.MESSAGES_THROTTLE.name());
List<Message> throttleMessages = msgThrottleOutput.getMessages("TestDB", new Partition("TestDB_0"));
Assert.assertEquals(throttleMessages.size(), 4);
Assert.assertTrue(throttleMessages.contains(msg1));
Assert.assertTrue(throttleMessages.contains(msg2));
Assert.assertTrue(throttleMessages.contains(msg3));
Assert.assertTrue(throttleMessages.contains(msg4));
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.manager.zk.ZKHelixDataAccessor in project helix by apache.
the class TestRebalancePipeline method testChangeIdealStateWithPendingMsg.
@Test
public void testChangeIdealStateWithPendingMsg() {
String clusterName = "CLUSTER_" + _className + "_pending";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient));
HelixManager manager = new DummyClusterManager(clusterName, accessor);
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
event.addAttribute(AttributeName.helixmanager.name(), manager);
ClusterDataCache cache = new ClusterDataCache();
event.addAttribute(AttributeName.ClusterDataCache.name(), cache);
refreshClusterConfig(clusterName, accessor);
final String resourceName = "testResource_pending";
String[] resourceGroups = new String[] { resourceName };
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[] { 0 }, resourceGroups, 1, 1);
setupLiveInstances(clusterName, new int[] { 0 });
setupStateModel(clusterName);
// cluster data cache refresh pipeline
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
// rebalance pipeline
Pipeline rebalancePipeline = new Pipeline();
rebalancePipeline.addStage(new ResourceComputationStage());
rebalancePipeline.addStage(new CurrentStateComputationStage());
rebalancePipeline.addStage(new BestPossibleStateCalcStage());
rebalancePipeline.addStage(new IntermediateStateCalcStage());
rebalancePipeline.addStage(new MessageGenerationPhase());
rebalancePipeline.addStage(new MessageSelectionStage());
rebalancePipeline.addStage(new MessageThrottleStage());
rebalancePipeline.addStage(new TaskAssignmentStage());
// round1: set node0 currentState to OFFLINE and node1 currentState to SLAVE
setCurrentState(clusterName, "localhost_0", resourceName, resourceName + "_0", "session_0", "OFFLINE");
runPipeline(event, dataRefresh);
runPipeline(event, rebalancePipeline);
MessageSelectionStageOutput msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
List<Message> messages = msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 1, "Should output 1 message: OFFLINE-SLAVE for node0");
Message message = messages.get(0);
Assert.assertEquals(message.getFromState(), "OFFLINE");
Assert.assertEquals(message.getToState(), "SLAVE");
Assert.assertEquals(message.getTgtName(), "localhost_0");
// round2: drop resource, but keep the
// message, make sure controller should not send O->DROPPED until O->S is done
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.dropResource(clusterName, resourceName);
List<IdealState> idealStates = accessor.getChildValues(accessor.keyBuilder().idealStates());
cache.setIdealStates(idealStates);
runPipeline(event, dataRefresh);
cache = event.getAttribute(AttributeName.ClusterDataCache.name());
cache.setClusterConfig(new ClusterConfig(clusterName));
runPipeline(event, rebalancePipeline);
msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 0, "Should not output only 1 message: OFFLINE->DROPPED for localhost_0");
// round3: remove O->S message for localhost_0, localhost_0 still in OFFLINE
// controller should now send O->DROPPED to localhost_0
Builder keyBuilder = accessor.keyBuilder();
List<String> msgIds = accessor.getChildNames(keyBuilder.messages("localhost_0"));
accessor.removeProperty(keyBuilder.message("localhost_0", msgIds.get(0)));
runPipeline(event, dataRefresh);
runPipeline(event, rebalancePipeline);
msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 1, "Should output 1 message: OFFLINE->DROPPED for localhost_0");
message = messages.get(0);
Assert.assertEquals(message.getFromState(), "OFFLINE");
Assert.assertEquals(message.getToState(), "DROPPED");
Assert.assertEquals(message.getTgtName(), "localhost_0");
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.manager.zk.ZKHelixDataAccessor in project helix by apache.
the class TestRebalancePipeline method testMasterXfer.
@Test
public void testMasterXfer() {
String clusterName = "CLUSTER_" + _className + "_xfer";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient));
HelixManager manager = new DummyClusterManager(clusterName, accessor);
ClusterEvent event = new ClusterEvent(ClusterEventType.Unknown);
event.addAttribute(AttributeName.helixmanager.name(), manager);
refreshClusterConfig(clusterName, accessor);
final String resourceName = "testResource_xfer";
String[] resourceGroups = new String[] { resourceName };
// ideal state: node0 is MASTER, node1 is SLAVE
// replica=2 means 1 master and 1 slave
setupIdealState(clusterName, new int[] { 0, 1 }, resourceGroups, 1, 2);
setupLiveInstances(clusterName, new int[] { 1 });
setupStateModel(clusterName);
// cluster data cache refresh pipeline
Pipeline dataRefresh = new Pipeline();
dataRefresh.addStage(new ReadClusterDataStage());
// rebalance pipeline
Pipeline rebalancePipeline = new Pipeline();
rebalancePipeline.addStage(new ResourceComputationStage());
rebalancePipeline.addStage(new CurrentStateComputationStage());
rebalancePipeline.addStage(new BestPossibleStateCalcStage());
rebalancePipeline.addStage(new IntermediateStateCalcStage());
rebalancePipeline.addStage(new MessageGenerationPhase());
rebalancePipeline.addStage(new MessageSelectionStage());
rebalancePipeline.addStage(new MessageThrottleStage());
rebalancePipeline.addStage(new TaskAssignmentStage());
// round1: set node1 currentState to SLAVE
setCurrentState(clusterName, "localhost_1", resourceName, resourceName + "_0", "session_1", "SLAVE");
runPipeline(event, dataRefresh);
runPipeline(event, rebalancePipeline);
MessageSelectionStageOutput msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
List<Message> messages = msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 1, "Should output 1 message: SLAVE-MASTER for node1");
Message message = messages.get(0);
Assert.assertEquals(message.getFromState(), "SLAVE");
Assert.assertEquals(message.getToState(), "MASTER");
Assert.assertEquals(message.getTgtName(), "localhost_1");
// round2: updates node0 currentState to SLAVE but keep the
// message, make sure controller should not send S->M until removal is done
setupLiveInstances(clusterName, new int[] { 0 });
setCurrentState(clusterName, "localhost_0", resourceName, resourceName + "_0", "session_0", "SLAVE");
runPipeline(event, dataRefresh);
runPipeline(event, rebalancePipeline);
msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0"));
Assert.assertEquals(messages.size(), 0, "Should NOT output 1 message: SLAVE-MASTER for node0");
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
Aggregations