use of org.apache.helix.HelixManager in project helix by apache.
the class TestSchedulerMsgUsingQueue method testSchedulerMsgUsingQueue.
@Test()
public void testSchedulerMsgUsingQueue() throws Exception {
// Logger.getRootLogger().setLevel(Level.INFO);
_factory._results.clear();
Thread.sleep(2000);
HelixManager manager = null;
for (int i = 0; i < NODE_NR; i++) {
_participants[i].getMessagingService().registerMessageHandlerFactory(_factory.getMessageType(), _factory);
// _startCMResultMap.get(hostDest)._manager;
manager = _participants[i];
}
Message schedulerMessage = new Message(MessageType.SCHEDULER_MSG + "", UUID.randomUUID().toString());
schedulerMessage.setTgtSessionId("*");
schedulerMessage.setTgtName("CONTROLLER");
// TODO: change it to "ADMIN" ?
schedulerMessage.setSrcName("CONTROLLER");
schedulerMessage.getRecord().setSimpleField(DefaultSchedulerMessageHandlerFactory.SCHEDULER_TASK_QUEUE, "TestSchedulerMsg");
// Template for the individual message sent to each participant
Message msg = new Message(_factory.getMessageType(), "Template");
msg.setTgtSessionId("*");
msg.setMsgState(MessageState.NEW);
// Criteria to send individual messages
Criteria cr = new Criteria();
cr.setInstanceName("localhost_%");
cr.setRecipientInstanceType(InstanceType.PARTICIPANT);
cr.setSessionSpecific(false);
cr.setResource("%");
cr.setPartition("%");
ObjectMapper mapper = new ObjectMapper();
SerializationConfig serializationConfig = mapper.getSerializationConfig();
serializationConfig.set(SerializationConfig.Feature.INDENT_OUTPUT, true);
StringWriter sw = new StringWriter();
mapper.writeValue(sw, cr);
String crString = sw.toString();
schedulerMessage.getRecord().setSimpleField("Criteria", crString);
schedulerMessage.getRecord().setMapField("MessageTemplate", msg.getRecord().getSimpleFields());
schedulerMessage.getRecord().setSimpleField("TIMEOUT", "-1");
HelixDataAccessor helixDataAccessor = manager.getHelixDataAccessor();
PropertyKey.Builder keyBuilder = helixDataAccessor.keyBuilder();
helixDataAccessor.createControllerMessage(schedulerMessage);
for (int i = 0; i < 30; i++) {
Thread.sleep(2000);
if (_PARTITIONS == _factory._results.size()) {
break;
}
}
Assert.assertEquals(_PARTITIONS, _factory._results.size());
PropertyKey controllerTaskStatus = keyBuilder.controllerTaskStatus(MessageType.SCHEDULER_MSG.name(), schedulerMessage.getMsgId());
int messageResultCount = 0;
for (int i = 0; i < 10; i++) {
ZNRecord statusUpdate = helixDataAccessor.getProperty(controllerTaskStatus).getRecord();
Assert.assertTrue(statusUpdate.getMapField("SentMessageCount").get("MessageCount").equals("" + (_PARTITIONS * 3)));
for (String key : statusUpdate.getMapFields().keySet()) {
if (key.startsWith("MessageResult ")) {
messageResultCount++;
}
}
if (messageResultCount == _PARTITIONS * 3) {
break;
} else {
Thread.sleep(2000);
}
}
Assert.assertEquals(messageResultCount, _PARTITIONS * 3);
int count = 0;
for (Set<String> val : _factory._results.values()) {
count += val.size();
}
Assert.assertEquals(count, _PARTITIONS * 3);
}
use of org.apache.helix.HelixManager in project helix by apache.
the class TestInstanceAutoJoin method testInstanceAutoJoin.
@Test
public void testInstanceAutoJoin() throws Exception {
HelixManager manager = _participants[0];
HelixDataAccessor accessor = manager.getHelixDataAccessor();
_setupTool.addResourceToCluster(CLUSTER_NAME, db2, 60, "OnlineOffline", RebalanceMode.FULL_AUTO + "");
_setupTool.rebalanceStorageCluster(CLUSTER_NAME, db2, 1);
String instance2 = "localhost_279699";
// StartCMResult result = TestHelper.startDummyProcess(ZK_ADDR, CLUSTER_NAME, instance2);
MockParticipantManager newParticipant = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, instance2);
newParticipant.syncStart();
Thread.sleep(500);
// Assert.assertFalse(result._thread.isAlive());
Assert.assertTrue(null == manager.getHelixDataAccessor().getProperty(accessor.keyBuilder().liveInstance(instance2)));
ConfigScope scope = new ConfigScopeBuilder().forCluster(CLUSTER_NAME).build();
manager.getConfigAccessor().set(scope, ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN, "true");
newParticipant = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, instance2);
newParticipant.syncStart();
Thread.sleep(500);
// Assert.assertTrue(result._thread.isAlive() || result2._thread.isAlive());
for (int i = 0; i < 20; i++) {
if (null == manager.getHelixDataAccessor().getProperty(accessor.keyBuilder().liveInstance(instance2))) {
Thread.sleep(100);
} else
break;
}
Assert.assertTrue(null != manager.getHelixDataAccessor().getProperty(accessor.keyBuilder().liveInstance(instance2)));
newParticipant.syncStop();
}
use of org.apache.helix.HelixManager in project helix by apache.
the class TestInstanceHistory method testInstanceHistory.
@Test()
public void testInstanceHistory() throws Exception {
HelixManager manager = HelixManagerFactory.getZKHelixManager(CLUSTER_NAME, "admin", InstanceType.ADMINISTRATOR, ZK_ADDR);
manager.connect();
PropertyKey.Builder keyBuilder = new PropertyKey.Builder(CLUSTER_NAME);
PropertyKey propertyKey = keyBuilder.participantHistory(_participants[0].getInstanceName());
ParticipantHistory history = manager.getHelixDataAccessor().getProperty(propertyKey);
Assert.assertNotNull(history);
List<String> list = history.getRecord().getListField("HISTORY");
Assert.assertEquals(list.size(), 1);
for (int i = 0; i <= 22; i++) {
_participants[0].disconnect();
_participants[0].connect();
}
history = manager.getHelixDataAccessor().getProperty(propertyKey);
Assert.assertNotNull(history);
list = history.getRecord().getListField("HISTORY");
Assert.assertEquals(list.size(), 20);
list = history.getRecord().getListField("OFFLINE");
Assert.assertEquals(list.size(), 20);
manager.disconnect();
}
use of org.apache.helix.HelixManager in project ambry by linkedin.
the class HelixClusterManager method initializeHelixManagerAndPropertyStoreInLocalDC.
/**
* Initialize HelixManager in local datacenter and complete subscription of HelixPropertyStore to listen for
* PartitionOverride zNode. This needs to happen before other datacenters are initialized so that any partition
* overrides can be properly honored.
* @param dataCenterToZkAddress the map mapping each datacenter to its corresponding ZkAddress.
* @param instanceName the String representation of the instance associated with this manager.
* @param helixFactory the factory class to construct and get a reference to a {@link HelixManager}.
* @return the HelixManager of local datacenter, or {@code null} if the local datacenter is
* {@link ReplicaType#CLOUD_BACKED}, as we currently do not support getting cluster state from Helix for cloud
* datacenters.
* @throws Exception
*/
private HelixManager initializeHelixManagerAndPropertyStoreInLocalDC(Map<String, DcZkInfo> dataCenterToZkAddress, String instanceName, HelixFactory helixFactory) throws Exception {
DcZkInfo dcZkInfo = dataCenterToZkAddress.get(clusterMapConfig.clusterMapDatacenterName);
if (dcZkInfo.getReplicaType() == ReplicaType.CLOUD_BACKED) {
return null;
}
// For now, the first ZK endpoint (if there are more than one endpoints) will be adopted by default. Note that, Ambry
// doesn't support multiple HelixClusterManagers(spectators) on same node.
String zkConnectStr = dcZkInfo.getZkConnectStrs().get(0);
HelixManager manager = helixFactory.getZkHelixManagerAndConnect(clusterName, instanceName, InstanceType.SPECTATOR, zkConnectStr);
helixPropertyStoreInLocalDc = manager.getHelixPropertyStore();
logger.info("HelixPropertyStore from local datacenter {} is: {}", dcZkInfo.getDcName(), helixPropertyStoreInLocalDc);
IZkDataListener dataListener = new IZkDataListener() {
@Override
public void handleDataChange(String dataPath, Object data) {
logger.info("Received data change notification for: {}", dataPath);
}
@Override
public void handleDataDeleted(String dataPath) {
logger.info("Received data delete notification for: {}", dataPath);
}
};
logger.info("Subscribing data listener to HelixPropertyStore.");
helixPropertyStoreInLocalDc.subscribeDataChanges(PARTITION_OVERRIDE_ZNODE_PATH, dataListener);
logger.info("Getting PartitionOverride ZNRecord from HelixPropertyStore");
ZNRecord zNRecord = helixPropertyStoreInLocalDc.get(PARTITION_OVERRIDE_ZNODE_PATH, null, AccessOption.PERSISTENT);
if (clusterMapConfig.clusterMapEnablePartitionOverride) {
if (zNRecord != null) {
partitionOverrideInfoMap.putAll(zNRecord.getMapFields());
logger.info("partitionOverrideInfoMap is initialized!");
} else {
logger.warn("ZNRecord from HelixPropertyStore is NULL, the partitionOverrideInfoMap is empty.");
}
}
return manager;
}
use of org.apache.helix.HelixManager in project ambry by linkedin.
the class HelixVcrClusterSpectator method spectate.
@Override
public void spectate() throws Exception {
HelixFactory helixFactory = new HelixFactory();
String selfInstanceName = ClusterMapUtils.getInstanceName(clusterMapConfig.clusterMapHostName, clusterMapConfig.clusterMapPort);
// Should we fail here if even one of the remote zk connection fails? If we have just one datacenter, then this will not be a problem.
// If we have two data centers, then its not clear if we should pass the startup with one remote zk connection failure. Because if remote
// zk connection fails on both data centers, then things like replication between data centers might just stop.
// For now, since we have only one fabric in cloud, and the spectator is being used for only cloud to store replication, this will work.
// Once we add more fabrics, we should revisit this.
HelixManager helixManager = helixFactory.getZkHelixManagerAndConnect(cloudConfig.vcrClusterName, selfInstanceName, InstanceType.SPECTATOR, cloudConfig.vcrClusterZkConnectString);
helixManager.addInstanceConfigChangeListener(this);
helixManager.addLiveInstanceChangeListener(this);
}
Aggregations