use of org.apache.helix.manager.zk.ZKHelixAdmin in project helix by apache.
the class TestStateModelLeak method testDrop.
/**
* test drop resource should remove all state models
* @throws Exception
*/
@Test
public void testDrop() throws Exception {
// Logger.getRootLogger().setLevel(Level.INFO);
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
int n = 2;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// participant port
TestHelper.setupCluster(// participant port
clusterName, // participant port
ZK_ADDR, // participant port
12918, // participant name prefix
"localhost", // resource name prefix
"TestDB", // resources
1, // partitions per resource
4, // number of nodes
n, // replicas
2, "MasterSlave", // do rebalance
true);
// start controller
ClusterControllerManager controller = new ClusterControllerManager(ZK_ADDR, clusterName, "controller");
controller.syncStart();
MockParticipantManager[] participants = new MockParticipantManager[n];
for (int i = 0; i < n; i++) {
final String instanceName = "localhost_" + (12918 + i);
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participants[i].syncStart();
}
boolean result = ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(result);
// check state-models in state-machine
HelixStateMachineEngine stateMachine = (HelixStateMachineEngine) participants[0].getStateMachineEngine();
StateModelFactory<? extends StateModel> fty = stateMachine.getStateModelFactory("MasterSlave");
Map<String, String> expectStateModelMap = new TreeMap<String, String>();
expectStateModelMap.put("TestDB0_0", "SLAVE");
expectStateModelMap.put("TestDB0_1", "MASTER");
expectStateModelMap.put("TestDB0_2", "SLAVE");
expectStateModelMap.put("TestDB0_3", "MASTER");
checkStateModelMap(fty, expectStateModelMap);
// drop resource
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.dropResource(clusterName, "TestDB0");
result = ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(result);
// check state models have been dropped also
Assert.assertTrue(fty.getPartitionSet("TestDB0").isEmpty(), "All state-models should be dropped, but was " + fty.getPartitionSet("TestDB0"));
// cleanup
controller.syncStop();
for (int i = 0; i < n; i++) {
participants[i].syncStop();
}
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.manager.zk.ZKHelixAdmin in project helix by apache.
the class TestStateModelLeak method testDropErrorPartition.
/**
* test drop resource in error state should remove all state-models
* @throws Exception
*/
@Test
public void testDropErrorPartition() throws Exception {
// Logger.getRootLogger().setLevel(Level.INFO);
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
int n = 2;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// participant port
TestHelper.setupCluster(// participant port
clusterName, // participant port
ZK_ADDR, // participant port
12918, // participant name prefix
"localhost", // resource name prefix
"TestDB", // resources
1, // partitions per resource
4, // number of nodes
n, // replicas
2, "MasterSlave", // do rebalance
true);
// start controller
ClusterControllerManager controller = new ClusterControllerManager(ZK_ADDR, clusterName, "controller");
controller.syncStart();
MockParticipantManager[] participants = new MockParticipantManager[n];
for (int i = 0; i < n; i++) {
final String instanceName = "localhost_" + (12918 + i);
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
if (i == 0) {
Map<String, Set<String>> errTransitionMap = new HashMap<String, Set<String>>();
Set<String> partitions = new HashSet<String>();
partitions.add("TestDB0_0");
errTransitionMap.put("OFFLINE-SLAVE", partitions);
participants[0].setTransition(new ErrTransition(errTransitionMap));
}
participants[i].syncStart();
}
Map<String, Map<String, String>> errStates = new HashMap<String, Map<String, String>>();
errStates.put("TestDB0", new HashMap<String, String>());
errStates.get("TestDB0").put("TestDB0_0", "localhost_12918");
boolean result = ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR, clusterName, errStates));
Assert.assertTrue(result);
// check state-models in state-machine
HelixStateMachineEngine stateMachine = (HelixStateMachineEngine) participants[0].getStateMachineEngine();
StateModelFactory<? extends StateModel> fty = stateMachine.getStateModelFactory("MasterSlave");
Map<String, String> expectStateModelMap = new TreeMap<String, String>();
expectStateModelMap.put("TestDB0_0", "ERROR");
expectStateModelMap.put("TestDB0_1", "MASTER");
expectStateModelMap.put("TestDB0_2", "SLAVE");
expectStateModelMap.put("TestDB0_3", "MASTER");
checkStateModelMap(fty, expectStateModelMap);
// drop resource
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.dropResource(clusterName, "TestDB0");
result = ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(result);
// check state models have been dropped also
Assert.assertTrue(fty.getPartitionSet("TestDB0").isEmpty(), "All state-models should be dropped, but was " + fty.getPartitionSet("TestDB0"));
// cleanup
controller.syncStop();
for (int i = 0; i < n; i++) {
participants[i].syncStop();
}
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.manager.zk.ZKHelixAdmin in project helix by apache.
the class TestZkHelixAdmin method testEnableDisablePartitions.
@Test
public void testEnableDisablePartitions() throws InterruptedException {
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.enablePartition(false, CLUSTER_NAME, (PARTICIPANT_PREFIX + "_" + _startPort), WorkflowGenerator.DEFAULT_TGT_DB, Arrays.asList(new String[] { "TestDB_0", "TestDB_2" }));
IdealState idealState = admin.getResourceIdealState(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB);
List<String> preferenceList = Arrays.asList(new String[] { "localhost_12919", "localhost_12918" });
for (String partitionName : idealState.getPartitionSet()) {
idealState.setPreferenceList(partitionName, preferenceList);
}
idealState.setRebalanceMode(IdealState.RebalanceMode.SEMI_AUTO);
admin.setResourceIdealState(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB, idealState);
String workflowName = TestHelper.getTestMethodName();
Workflow.Builder builder = new Workflow.Builder(workflowName);
JobConfig.Builder jobBuilder = new JobConfig.Builder().setWorkflow(workflowName).setCommand(MockTask.TASK_COMMAND).setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB).setTargetPartitionStates(Collections.singleton("SLAVE"));
builder.addJob("JOB", jobBuilder);
_driver.start(builder.build());
Thread.sleep(2000L);
JobContext jobContext = _driver.getJobContext(TaskUtil.getNamespacedJobName(workflowName, "JOB"));
Assert.assertEquals(jobContext.getPartitionState(0), null);
Assert.assertEquals(jobContext.getPartitionState(1), TaskPartitionState.COMPLETED);
Assert.assertEquals(jobContext.getPartitionState(2), null);
}
use of org.apache.helix.manager.zk.ZKHelixAdmin in project helix by apache.
the class YAMLClusterSetup method setupCluster.
/**
* Set up the cluster by parsing a YAML file.
* @param input InputStream representing the file
* @return ClusterConfig Java wrapper of the configuration file
*/
public YAMLClusterConfig setupCluster(InputStream input) {
// parse the YAML
Yaml yaml = new Yaml();
YAMLClusterConfig cfg = yaml.loadAs(input, YAMLClusterConfig.class);
// create the cluster
HelixAdmin helixAdmin = new ZKHelixAdmin(_zkAddress);
if (cfg.clusterName == null) {
throw new HelixException("Cluster name is required!");
}
helixAdmin.addCluster(cfg.clusterName);
// add each participant
if (cfg.participants != null) {
for (ParticipantConfig participant : cfg.participants) {
helixAdmin.addInstance(cfg.clusterName, getInstanceCfg(participant));
}
}
// add each resource
if (cfg.resources != null) {
for (ResourceConfig resource : cfg.resources) {
if (resource.name == null) {
throw new HelixException("Resources must be named!");
}
if (resource.stateModel == null || resource.stateModel.name == null) {
throw new HelixException("Resource must specify a named state model!");
}
// if states is null, assume using a built-in or already-added state model
if (resource.stateModel.states != null) {
StateModelDefinition stateModelDef = getStateModelDef(resource.stateModel, resource.constraints);
helixAdmin.addStateModelDef(cfg.clusterName, resource.stateModel.name, stateModelDef);
} else {
StateModelDefinition stateModelDef = null;
if (resource.stateModel.name.equals("MasterSlave")) {
stateModelDef = new StateModelDefinition(StateModelConfigGenerator.generateConfigForMasterSlave());
} else if (resource.stateModel.name.equals("OnlineOffline")) {
stateModelDef = new StateModelDefinition(StateModelConfigGenerator.generateConfigForOnlineOffline());
} else if (resource.stateModel.name.equals("LeaderStandby")) {
stateModelDef = new StateModelDefinition(StateModelConfigGenerator.generateConfigForLeaderStandby());
}
if (stateModelDef != null) {
try {
helixAdmin.addStateModelDef(cfg.clusterName, resource.stateModel.name, stateModelDef);
} catch (HelixException e) {
LOG.warn("State model definition " + resource.stateModel.name + " could not be added.");
}
}
}
int partitions = 1;
int replicas = 1;
if (resource.partitions != null) {
if (resource.partitions.containsKey("count")) {
partitions = resource.partitions.get("count");
}
if (resource.partitions.containsKey("replicas")) {
replicas = resource.partitions.get("replicas");
}
}
if (resource.rebalancer == null || !resource.rebalancer.containsKey("mode")) {
throw new HelixException("Rebalance mode is required!");
}
helixAdmin.addResource(cfg.clusterName, resource.name, partitions, resource.stateModel.name, resource.rebalancer.get("mode"));
// user-defined rebalancer
if (resource.rebalancer.containsKey("class") && resource.rebalancer.get("mode").equals(RebalanceMode.USER_DEFINED.toString())) {
IdealState idealState = helixAdmin.getResourceIdealState(cfg.clusterName, resource.name);
idealState.setRebalancerClassName(resource.rebalancer.get("class"));
helixAdmin.setResourceIdealState(cfg.clusterName, resource.name, idealState);
}
helixAdmin.rebalance(cfg.clusterName, resource.name, replicas);
}
}
// enable auto join if this option is set
if (cfg.autoJoinAllowed != null && cfg.autoJoinAllowed) {
HelixConfigScope scope = new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(cfg.clusterName).build();
Map<String, String> properties = new HashMap<String, String>();
properties.put(ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN, cfg.autoJoinAllowed.toString());
helixAdmin.setConfig(scope, properties);
}
return cfg;
}
use of org.apache.helix.manager.zk.ZKHelixAdmin in project helix by apache.
the class TestHelixConfigAccessor method testSetNonexistentParticipantConfig.
// HELIX-25: set participant Config should check existence of instance
@Test
public void testSetNonexistentParticipantConfig() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
ZKHelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.addCluster(clusterName, true);
ConfigAccessor configAccessor = new ConfigAccessor(_gZkClient);
HelixConfigScope participantScope = new HelixConfigScopeBuilder(ConfigScopeProperty.PARTICIPANT).forCluster(clusterName).forParticipant("localhost_12918").build();
try {
configAccessor.set(participantScope, "participantConfigKey", "participantConfigValue");
Assert.fail("Except fail to set participant-config because participant: localhost_12918 is not added to cluster yet");
} catch (HelixException e) {
// OK
}
admin.addInstance(clusterName, new InstanceConfig("localhost_12918"));
try {
configAccessor.set(participantScope, "participantConfigKey", "participantConfigValue");
} catch (Exception e) {
Assert.fail("Except succeed to set participant-config because participant: localhost_12918 has been added to cluster");
}
String participantConfigValue = configAccessor.get(participantScope, "participantConfigKey");
Assert.assertEquals(participantConfigValue, "participantConfigValue");
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
Aggregations