use of org.apache.helix.model.LiveInstance in project helix by apache.
the class TestResetPartitionState method clearStatusUpdate.
private void clearStatusUpdate(String clusterName, String instance, String resource, String partition) {
// clear status update for error partition so verify() will not fail on
// old errors
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient));
Builder keyBuilder = accessor.keyBuilder();
LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instance));
accessor.removeProperty(keyBuilder.stateTransitionStatus(instance, liveInstance.getSessionId(), resource, partition));
}
use of org.apache.helix.model.LiveInstance in project helix by apache.
the class ControllerResource method getControllerRepresentation.
StringRepresentation getControllerRepresentation(String clusterName) throws JsonGenerationException, JsonMappingException, IOException {
Builder keyBuilder = new PropertyKey.Builder(clusterName);
ZkClient zkClient = ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
ZNRecord record = null;
LiveInstance leader = accessor.getProperty(keyBuilder.controllerLeader());
if (leader != null) {
record = leader.getRecord();
} else {
record = new ZNRecord("");
DateFormat formatter = new SimpleDateFormat("yyyyMMdd-HHmmss.SSSSSS");
String time = formatter.format(new Date());
Map<String, String> contentMap = new TreeMap<String, String>();
contentMap.put("AdditionalInfo", "No leader exists");
record.setMapField(Level.HELIX_INFO + "-" + time, contentMap);
}
boolean paused = (accessor.getProperty(keyBuilder.pause()) == null ? false : true);
record.setSimpleField(PropertyType.PAUSE.toString(), "" + paused);
String retVal = ClusterRepresentationUtil.ZNRecordToJson(record);
StringRepresentation representation = new StringRepresentation(retVal, MediaType.APPLICATION_JSON);
return representation;
}
use of org.apache.helix.model.LiveInstance in project helix by apache.
the class SchedulerTasksResource method getSchedulerTasksRepresentation.
StringRepresentation getSchedulerTasksRepresentation() throws JsonGenerationException, JsonMappingException, IOException {
String clusterName = (String) getRequest().getAttributes().get("clusterName");
String instanceName = (String) getRequest().getAttributes().get("instanceName");
ZkClient zkClient = (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkClient);
List<String> instances = setupTool.getClusterManagementTool().getInstancesInCluster(clusterName);
HelixDataAccessor accessor = ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName);
LiveInstance liveInstance = accessor.getProperty(accessor.keyBuilder().liveInstance(instanceName));
String sessionId = liveInstance.getSessionId();
// (ClusterRepresentationUtil.ObjectToJson(instanceConfigs),
StringRepresentation representation = new StringRepresentation("");
return representation;
}
use of org.apache.helix.model.LiveInstance in project helix by apache.
the class InstancesResource method getInstancesRepresentation.
StringRepresentation getInstancesRepresentation(String clusterName) throws JsonGenerationException, JsonMappingException, IOException {
ZkClient zkClient = (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
HelixDataAccessor accessor = ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName);
Map<String, LiveInstance> liveInstancesMap = accessor.getChildValuesMap(accessor.keyBuilder().liveInstances());
Map<String, InstanceConfig> instanceConfigsMap = accessor.getChildValuesMap(accessor.keyBuilder().instanceConfigs());
Map<String, List<String>> tagInstanceLists = new TreeMap<String, List<String>>();
for (String instanceName : instanceConfigsMap.keySet()) {
boolean isAlive = liveInstancesMap.containsKey(instanceName);
instanceConfigsMap.get(instanceName).getRecord().setSimpleField("Alive", isAlive + "");
InstanceConfig config = instanceConfigsMap.get(instanceName);
for (String tag : config.getTags()) {
if (!tagInstanceLists.containsKey(tag)) {
tagInstanceLists.put(tag, new LinkedList<String>());
}
if (!tagInstanceLists.get(tag).contains(instanceName)) {
tagInstanceLists.get(tag).add(instanceName);
}
}
}
// Wrap raw data into an object, then serialize it
List<ZNRecord> recordList = Lists.newArrayList();
for (InstanceConfig instanceConfig : instanceConfigsMap.values()) {
recordList.add(instanceConfig.getRecord());
}
ListInstancesWrapper wrapper = new ListInstancesWrapper();
wrapper.instanceInfo = recordList;
wrapper.tagInfo = tagInstanceLists;
StringRepresentation representation = new StringRepresentation(ClusterRepresentationUtil.ObjectToJson(wrapper), MediaType.APPLICATION_JSON);
return representation;
}
use of org.apache.helix.model.LiveInstance in project helix by apache.
the class TestHelixAdminCli method testStartCluster.
@Test
public void testStartCluster() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
String grandClusterName = clusterName + "_grand";
final int n = 6;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
MockParticipantManager[] participants = new MockParticipantManager[n];
ClusterDistributedController[] controllers = new ClusterDistributedController[2];
setupCluster(clusterName, grandClusterName, n, participants, controllers);
// activate clusters
// wrong grand clusterName
String command = "-zkSvr localhost:2183 -activateCluster " + clusterName + " nonExistGrandCluster true";
try {
ClusterSetup.processCommandLineArgs(command.split("\\s+"));
Assert.fail("add " + clusterName + " to grandCluster should fail since grandCluster doesn't exists");
} catch (Exception e) {
// OK
}
// wrong cluster name
command = "-zkSvr localhost:2183 -activateCluster nonExistCluster " + grandClusterName + " true";
try {
ClusterSetup.processCommandLineArgs(command.split("\\s+"));
Assert.fail("add nonExistCluster to " + grandClusterName + " should fail since nonExistCluster doesn't exists");
} catch (Exception e) {
// OK
}
command = "-zkSvr localhost:2183 -activateCluster " + clusterName + " " + grandClusterName + " true";
ClusterSetup.processCommandLineArgs(command.split("\\s+"));
Thread.sleep(500);
// drop a running cluster
command = "-zkSvr localhost:2183 -dropCluster " + clusterName;
try {
ClusterSetup.processCommandLineArgs(command.split("\\s+"));
Assert.fail("drop " + clusterName + " should fail since it's still running");
} catch (Exception e) {
// OK
}
// verify leader node
BaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient);
HelixDataAccessor accessor = new ZKHelixDataAccessor(grandClusterName, baseAccessor);
LiveInstance controllerLeader = accessor.getProperty(accessor.keyBuilder().controllerLeader());
Assert.assertNotNull(controllerLeader, "controllerLeader should be either controller_9000 or controller_9001");
Assert.assertTrue(controllerLeader.getInstanceName().startsWith("controller_900"));
accessor = new ZKHelixDataAccessor(clusterName, baseAccessor);
LiveInstance leader = accessor.getProperty(accessor.keyBuilder().controllerLeader());
for (int i = 0; i < 20; i++) {
if (leader != null) {
break;
}
Thread.sleep(200);
leader = accessor.getProperty(accessor.keyBuilder().controllerLeader());
}
Assert.assertTrue(leader.getInstanceName().startsWith("controller_900"));
boolean verifyResult = ClusterStateVerifier.verifyByZkCallback(new MasterNbInExtViewVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(verifyResult);
verifyResult = ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(verifyResult);
// clean up
for (ClusterDistributedController controller : controllers) {
controller.syncStop();
}
for (int i = 0; i < participants.length; i++) {
participants[i].syncStop();
}
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
Aggregations