use of org.apache.helix.HelixDataAccessor in project helix by apache.
the class ZKPathDataDumpTask method run.
@Override
public void run() {
// For each record in status update and error node
// TODO: for now the status updates are dumped to cluster manager log4j log.
// We need to think if we should create per-instance log files that contains
// per-instance statusUpdates
// and errors
LOG.info("Scan statusUpdates and errors for cluster: " + _manager.getClusterName() + ", by controller: " + _manager);
HelixDataAccessor accessor = _manager.getHelixDataAccessor();
Builder keyBuilder = accessor.keyBuilder();
BaseDataAccessor<ZNRecord> baseAccessor = accessor.getBaseDataAccessor();
List<String> instances = accessor.getChildNames(keyBuilder.instanceConfigs());
for (String instance : instances) {
// dump participant status updates
String statusUpdatePath = PropertyPathBuilder.instanceStatusUpdate(_manager.getClusterName(), instance);
dump(baseAccessor, statusUpdatePath, _thresholdNoChangeMsForStatusUpdates, _maxLeafCount);
// dump participant errors
String errorPath = PropertyPathBuilder.instanceError(_manager.getClusterName(), instance);
dump(baseAccessor, errorPath, _thresholdNoChangeMsForErrors, _maxLeafCount);
}
// dump controller status updates
String controllerStatusUpdatePath = PropertyPathBuilder.controllerStatusUpdate(_manager.getClusterName());
dump(baseAccessor, controllerStatusUpdatePath, _thresholdNoChangeMsForStatusUpdates, _maxLeafCount);
// dump controller errors
String controllerErrorPath = PropertyPathBuilder.controllerError(_manager.getClusterName());
dump(baseAccessor, controllerErrorPath, _thresholdNoChangeMsForErrors, _maxLeafCount);
}
use of org.apache.helix.HelixDataAccessor in project helix by apache.
the class TestStateTransitionTimeout method testStateTransitionTimeOut.
@Test
public void testStateTransitionTimeOut() throws Exception {
Map<String, SleepStateModelFactory> factories = new HashMap<String, SleepStateModelFactory>();
IdealState idealState = _setupTool.getClusterManagementTool().getResourceIdealState(CLUSTER_NAME, TEST_DB);
for (int i = 0; i < NODE_NR; i++) {
String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
SleepStateModelFactory factory = new SleepStateModelFactory(1000);
factories.put(instanceName, factory);
for (String p : idealState.getPartitionSet()) {
if (idealState.getPreferenceList(p).get(0).equals(instanceName)) {
factory.addPartition(p);
}
}
_participants[i] = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, instanceName);
_participants[i].getStateMachineEngine().registerStateModelFactory("MasterSlave", factory);
_participants[i].syncStart();
}
String controllerName = CONTROLLER_PREFIX + "_0";
_controller = new ClusterControllerManager(ZK_ADDR, CLUSTER_NAME, controllerName);
_controller.syncStart();
boolean result = ClusterStateVerifier.verifyByZkCallback(new MasterNbInExtViewVerifier(ZK_ADDR, CLUSTER_NAME));
Assert.assertTrue(result);
HelixDataAccessor accessor = _participants[0].getHelixDataAccessor();
Builder kb = accessor.keyBuilder();
ExternalView ev = accessor.getProperty(kb.externalView(TEST_DB));
for (String p : idealState.getPartitionSet()) {
String idealMaster = idealState.getPreferenceList(p).get(0);
Assert.assertTrue(ev.getStateMap(p).get(idealMaster).equals("ERROR"));
TimeOutStateModel model = factories.get(idealMaster).getStateModel(TEST_DB, p);
Assert.assertEquals(model._errorCallcount, 1);
Assert.assertEquals(model._error.getCode(), ErrorCode.TIMEOUT);
}
}
use of org.apache.helix.HelixDataAccessor in project helix by apache.
the class TestAutoRebalancePartitionLimit method beforeClass.
@Override
@BeforeClass
public void beforeClass() throws Exception {
// Logger.getRootLogger().setLevel(Level.INFO);
System.out.println("START " + CLASS_NAME + " at " + new Date(System.currentTimeMillis()));
String namespace = "/" + CLUSTER_NAME;
if (_gZkClient.exists(namespace)) {
_gZkClient.deleteRecursively(namespace);
}
_setupTool = new ClusterSetup(ZK_ADDR);
// setup storage cluster
_setupTool.addCluster(CLUSTER_NAME, true);
_setupTool.addResourceToCluster(CLUSTER_NAME, TEST_DB, 100, "OnlineOffline", RebalanceMode.FULL_AUTO + "", 0, 25);
for (int i = 0; i < NODE_NR; i++) {
String storageNodeName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
_setupTool.addInstanceToCluster(CLUSTER_NAME, storageNodeName);
}
_setupTool.rebalanceStorageCluster(CLUSTER_NAME, TEST_DB, 1);
// start controller
String controllerName = CONTROLLER_PREFIX + "_0";
_controller = new ClusterControllerManager(ZK_ADDR, CLUSTER_NAME, controllerName);
_controller.syncStart();
// _startCMResultMap.get(controllerName)._manager;
HelixManager manager = _controller;
HelixDataAccessor accessor = manager.getHelixDataAccessor();
// start dummy participants
for (int i = 0; i < NODE_NR; i++) {
String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
_participants[i] = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, instanceName);
_participants[i].syncStart();
Thread.sleep(2000);
boolean result = ClusterStateVerifier.verifyByZkCallback(new ExternalViewBalancedVerifier(_gZkClient, CLUSTER_NAME, TEST_DB));
Assert.assertTrue(result);
ExternalView ev = manager.getHelixDataAccessor().getProperty(accessor.keyBuilder().externalView(TEST_DB));
System.out.println(ev.getPartitionSet().size());
if (i < 3) {
Assert.assertEquals(ev.getPartitionSet().size(), 25 * (i + 1));
} else {
Assert.assertEquals(ev.getPartitionSet().size(), 100);
}
}
boolean result = ClusterStateVerifier.verifyByZkCallback(new ExternalViewBalancedVerifier(_gZkClient, CLUSTER_NAME, TEST_DB));
Assert.assertTrue(result);
}
use of org.apache.helix.HelixDataAccessor in project helix by apache.
the class TestAutoRebalancePartitionLimit method testAutoRebalanceWithMaxPartitionPerNode.
@Test()
public void testAutoRebalanceWithMaxPartitionPerNode() throws Exception {
HelixManager manager = _controller;
// kill 1 node
_participants[0].syncStop();
// verifyBalanceExternalView();
boolean result = ClusterStateVerifier.verifyByZkCallback(new ExternalViewBalancedVerifier(_gZkClient, CLUSTER_NAME, TEST_DB));
Assert.assertTrue(result);
HelixDataAccessor accessor = manager.getHelixDataAccessor();
ExternalView ev = manager.getHelixDataAccessor().getProperty(accessor.keyBuilder().externalView(TEST_DB));
Assert.assertEquals(ev.getPartitionSet().size(), 100);
_participants[1].syncStop();
// verifyBalanceExternalView();
result = ClusterStateVerifier.verifyByZkCallback(new ExternalViewBalancedVerifier(_gZkClient, CLUSTER_NAME, TEST_DB));
Assert.assertTrue(result);
ev = manager.getHelixDataAccessor().getProperty(accessor.keyBuilder().externalView(TEST_DB));
Assert.assertEquals(ev.getPartitionSet().size(), 75);
// add 2 nodes
for (int i = 0; i < 2; i++) {
String storageNodeName = PARTICIPANT_PREFIX + "_" + (1000 + i);
_setupTool.addInstanceToCluster(CLUSTER_NAME, storageNodeName);
String newInstanceName = storageNodeName.replace(':', '_');
MockParticipantManager participant = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, newInstanceName);
participant.syncStart();
}
Thread.sleep(1000);
result = ClusterStateVerifier.verifyByZkCallback(new ExternalViewBalancedVerifier(_gZkClient, CLUSTER_NAME, TEST_DB));
Assert.assertTrue(result);
}
use of org.apache.helix.HelixDataAccessor in project helix by apache.
the class TestSchedulerMessage method testSchedulerMsg.
@Test(dependsOnMethods = "testSchedulerZeroMsg")
public void testSchedulerMsg() throws Exception {
_factory._results.clear();
HelixManager manager = null;
for (int i = 0; i < NODE_NR; i++) {
_participants[i].getMessagingService().registerMessageHandlerFactory(_factory.getMessageTypes(), _factory);
// _startCMResultMap.get(hostDest)._manager;
manager = _participants[i];
}
Message schedulerMessage = new Message(MessageType.SCHEDULER_MSG + "", UUID.randomUUID().toString());
schedulerMessage.setTgtSessionId("*");
schedulerMessage.setTgtName("CONTROLLER");
// TODO: change it to "ADMIN" ?
schedulerMessage.setSrcName("CONTROLLER");
// schedulerMessage.getRecord().setSimpleField(DefaultSchedulerMessageHandlerFactory.SCHEDULER_TASK_QUEUE,
// "TestSchedulerMsg");
// Template for the individual message sent to each participant
Message msg = new Message(_factory.getMessageTypes().get(0), "Template");
msg.setTgtSessionId("*");
msg.setMsgState(MessageState.NEW);
// Criteria to send individual messages
Criteria cr = new Criteria();
cr.setInstanceName("localhost_%");
cr.setRecipientInstanceType(InstanceType.PARTICIPANT);
cr.setSessionSpecific(false);
cr.setResource("%");
cr.setPartition("%");
ObjectMapper mapper = new ObjectMapper();
SerializationConfig serializationConfig = mapper.getSerializationConfig();
serializationConfig.set(SerializationConfig.Feature.INDENT_OUTPUT, true);
StringWriter sw = new StringWriter();
mapper.writeValue(sw, cr);
String crString = sw.toString();
schedulerMessage.getRecord().setSimpleField("Criteria", crString);
schedulerMessage.getRecord().setMapField("MessageTemplate", msg.getRecord().getSimpleFields());
schedulerMessage.getRecord().setSimpleField("TIMEOUT", "-1");
HelixDataAccessor helixDataAccessor = manager.getHelixDataAccessor();
Builder keyBuilder = helixDataAccessor.keyBuilder();
helixDataAccessor.createControllerMessage(schedulerMessage);
for (int i = 0; i < 30; i++) {
Thread.sleep(2000);
if (_PARTITIONS == _factory._results.size()) {
break;
}
}
Assert.assertEquals(_PARTITIONS, _factory._results.size());
PropertyKey controllerTaskStatus = keyBuilder.controllerTaskStatus(MessageType.SCHEDULER_MSG.name(), schedulerMessage.getMsgId());
int messageResultCount = 0;
for (int i = 0; i < 10; i++) {
Thread.sleep(1000);
ZNRecord statusUpdate = helixDataAccessor.getProperty(controllerTaskStatus).getRecord();
Assert.assertTrue(statusUpdate.getMapField("SentMessageCount").get("MessageCount").equals("" + (_PARTITIONS * 3)));
for (String key : statusUpdate.getMapFields().keySet()) {
if (key.startsWith("MessageResult ")) {
messageResultCount++;
Assert.assertTrue(statusUpdate.getMapField(key).size() > 1);
}
}
if (messageResultCount == _PARTITIONS * 3) {
break;
} else {
Thread.sleep(2000);
}
}
Assert.assertEquals(messageResultCount, _PARTITIONS * 3);
int count = 0;
for (Set<String> val : _factory._results.values()) {
count += val.size();
}
Assert.assertEquals(count, _PARTITIONS * 3);
// test the ZkPathDataDumpTask
String controllerStatusPath = PropertyPathBuilder.controllerStatusUpdate(manager.getClusterName());
List<String> subPaths = _gZkClient.getChildren(controllerStatusPath);
Assert.assertTrue(subPaths.size() > 0);
for (String subPath : subPaths) {
String nextPath = controllerStatusPath + "/" + subPath;
List<String> subsubPaths = _gZkClient.getChildren(nextPath);
Assert.assertTrue(subsubPaths.size() > 0);
}
String instanceStatusPath = PropertyPathBuilder.instanceStatusUpdate(manager.getClusterName(), "localhost_" + (START_PORT));
subPaths = _gZkClient.getChildren(instanceStatusPath);
Assert.assertTrue(subPaths.size() == 0);
for (String subPath : subPaths) {
String nextPath = instanceStatusPath + "/" + subPath;
List<String> subsubPaths = _gZkClient.getChildren(nextPath);
Assert.assertTrue(subsubPaths.size() > 0);
for (String subsubPath : subsubPaths) {
String nextnextPath = nextPath + "/" + subsubPath;
Assert.assertTrue(_gZkClient.getChildren(nextnextPath).size() > 0);
}
}
Thread.sleep(3000);
ZKPathDataDumpTask dumpTask = new ZKPathDataDumpTask(manager, 0L, 0L, Integer.MAX_VALUE);
dumpTask.run();
subPaths = _gZkClient.getChildren(controllerStatusPath);
Assert.assertTrue(subPaths.size() > 0);
for (String subPath : subPaths) {
String nextPath = controllerStatusPath + "/" + subPath;
List<String> subsubPaths = _gZkClient.getChildren(nextPath);
Assert.assertTrue(subsubPaths.size() == 0);
}
subPaths = _gZkClient.getChildren(instanceStatusPath);
Assert.assertTrue(subPaths.size() == 0);
for (String subPath : subPaths) {
String nextPath = instanceStatusPath + "/" + subPath;
List<String> subsubPaths = _gZkClient.getChildren(nextPath);
Assert.assertTrue(subsubPaths.size() > 0);
for (String subsubPath : subsubPaths) {
String nextnextPath = nextPath + "/" + subsubPath;
Assert.assertTrue(_gZkClient.getChildren(nextnextPath).size() == 0);
}
}
}
Aggregations